import cinder.policy
from cinder import utils
import cinder.volume
+from cinder.volume import utils as volume_utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
if volume['status'] != "available":
msg = _('Volume to be backed up must be available')
raise exception.InvalidVolume(reason=msg)
- volume_host = volume['host'].partition('@')[0]
+ volume_host = volume_utils.extract_host(volume['host'], 'host')
if not self._is_backup_service_enabled(volume, volume_host):
raise exception.ServiceNotFound(service_id='cinder-backup')
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder import utils
+from cinder.volume import utils as volume_utils
LOG = logging.getLogger(__name__)
LOG.info(_("Cleaning up incomplete backup operations."))
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
for volume in volumes:
- backend = self._get_volume_backend(host=volume['host'])
+ volume_host = volume_utils.extract_host(volume['host'], 'backend')
+ backend = self._get_volume_backend(host=volume_host)
if volume['status'] == 'backing-up':
LOG.info(_('Resetting volume %s to available '
'(was backing-up).') % volume['id'])
LOG.info(_('Create backup started, backup: %(backup_id)s '
'volume: %(volume_id)s.') %
{'backup_id': backup_id, 'volume_id': volume_id})
- backend = self._get_volume_backend(host=volume['host'])
+ volume_host = volume_utils.extract_host(volume['host'], 'backend')
+ backend = self._get_volume_backend(host=volume_host)
self.db.backup_update(context, backup_id, {'host': self.host,
'service':
backup = self.db.backup_get(context, backup_id)
volume = self.db.volume_get(context, volume_id)
- backend = self._get_volume_backend(host=volume['host'])
+ volume_host = volume_utils.extract_host(volume['host'], 'backend')
+ backend = self._get_volume_backend(host=volume_host)
self.db.backup_update(context, backup_id, {'host': self.host})
@require_admin_context
def volume_get_all_by_host(context, host):
- return _volume_get_query(context).filter_by(host=host).all()
+ """Retrieves all volumes hosted on a host."""
+ # As a side effect of the introduction of pool-aware scheduler,
+ # newly created volumes will have pool information appended to
+ # 'host' field of a volume record. So a volume record in DB can
+ # now be either form below:
+ # Host
+ # Host#Pool
+ if host and isinstance(host, basestring):
+ session = get_session()
+ with session.begin():
+ host_attr = getattr(models.Volume, 'host')
+ conditions = [host_attr == host,
+ host_attr.op('LIKE')(host + '#%')]
+ result = _volume_get_query(context).filter(or_(*conditions)).all()
+ return result
+ elif not host:
+ return []
@require_admin_context
from cinder.openstack.common import log as logging
from cinder.scheduler import driver
from cinder.scheduler import scheduler_options
+from cinder.volume import utils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
if host_state.host == host:
return host_state
- msg = (_('cannot place volume %(id)s on %(host)s')
+ msg = (_('Cannot place volume %(id)s on %(host)s')
% {'id': request_spec['volume_id'], 'host': host})
raise exception.NoValidHost(reason=msg)
if host_state.host == current_host:
return host_state
+ if utils.extract_host(current_host, 'pool') is None:
+ # legacy volumes created before pool is introduced has no pool
+ # info in host. But host_state.host always include pool level
+ # info. In this case if above exact match didn't work out, we
+ # find host_state that are of the same host of volume being
+ # retyped. In other words, for legacy volumes, retyping could
+ # cause migration between pools on same host, which we consider
+ # it is different from migration between hosts thus allow that
+ # to happen even migration policy is 'never'.
+ for weighed_host in weighed_hosts:
+ host_state = weighed_host.obj
+ backend = utils.extract_host(host_state.host, 'backend')
+ if backend == current_host:
+ return host_state
+
if migration_policy == 'never':
msg = (_('Current host not valid for volume %(id)s with type '
'%(type)s, migration not allowed')
from cinder.openstack.common.scheduler import weights
from cinder.openstack.common import timeutils
from cinder import utils
+from cinder.volume import utils as vol_utils
host_manager_opts = [
class HostState(object):
- """Mutable and immutable information tracked for a host."""
+ """Mutable and immutable information tracked for a volume backend."""
def __init__(self, host, capabilities=None, service=None):
+ self.capabilities = None
+ self.service = None
self.host = host
self.update_capabilities(capabilities, service)
self.free_capacity_gb = None
self.reserved_percentage = 0
+ # PoolState for all pools
+ self.pools = {}
+
self.updated = None
def update_capabilities(self, capabilities=None, service=None):
service = {}
self.service = ReadOnlyDict(service)
- def update_from_volume_capability(self, capability):
- """Update information about a host from its volume_node info."""
+ def update_from_volume_capability(self, capability, service=None):
+ """Update information about a host from its volume_node info.
+
+ 'capability' is the status info reported by volume backend, a typical
+ capability looks like this:
+
+ capability = {
+ 'volume_backend_name': 'Local iSCSI', #\
+ 'vendor_name': 'OpenStack', # backend level
+ 'driver_version': '1.0', # mandatory/fixed
+ 'storage_protocol': 'iSCSI', #- stats&capabilities
+
+ 'active_volumes': 10, #\
+ 'IOPS_provisioned': 30000, # optional custom
+ 'fancy_capability_1': 'eat', # stats & capabilities
+ 'fancy_capability_2': 'drink', #/
+
+ 'pools': [
+ {'pool_name': '1st pool', #\
+ 'total_capacity_gb': 500, # mandatory stats for
+ 'free_capacity_gb': 230, # pools
+ 'allocated_capacity_gb': 270, # |
+ 'QoS_support': 'False', # |
+ 'reserved_percentage': 0, #/
+
+ 'dying_disks': 100, #\
+ 'super_hero_1': 'spider-man', # optional custom
+ 'super_hero_2': 'flash', # stats & capabilities
+ 'super_hero_3': 'neoncat' #/
+ },
+ {'pool_name': '2nd pool',
+ 'total_capacity_gb': 1024,
+ 'free_capacity_gb': 1024,
+ 'allocated_capacity_gb': 0,
+ 'QoS_support': 'False',
+ 'reserved_percentage': 0,
+
+ 'dying_disks': 200,
+ 'super_hero_1': 'superman',
+ 'super_hero_2': ' ',
+ 'super_hero_2': 'Hulk',
+ }
+ ]
+ }
+ """
+ self.update_capabilities(capability, service)
+
if capability:
if self.updated and self.updated > capability['timestamp']:
return
- self.volume_backend = capability.get('volume_backend_name', None)
- self.vendor_name = capability.get('vendor_name', None)
- self.driver_version = capability.get('driver_version', None)
- self.storage_protocol = capability.get('storage_protocol', None)
- self.QoS_support = capability.get('QoS_support', False)
+ # Update backend level info
+ self.update_backend(capability)
- self.total_capacity_gb = capability['total_capacity_gb']
- self.free_capacity_gb = capability['free_capacity_gb']
- self.allocated_capacity_gb = capability.get(
- 'allocated_capacity_gb', 0)
- self.reserved_percentage = capability['reserved_percentage']
+ # Update pool level info
+ self.update_pools(capability, service)
- self.updated = capability['timestamp']
+ def update_pools(self, capability, service):
+ """Update storage pools information from backend reported info."""
+ if not capability:
+ return
+
+ pools = capability.get('pools', None)
+ active_pools = set()
+ if pools and isinstance(pools, list):
+ # Update all pools stats according to information from list
+ # of pools in volume capacity
+ for pool_cap in pools:
+ pool_name = pool_cap['pool_name']
+ self._append_backend_info(pool_cap)
+ cur_pool = self.pools.get(pool_name, None)
+ if not cur_pool:
+ # Add new pool
+ cur_pool = PoolState(self.host, pool_cap, pool_name)
+ self.pools[pool_name] = cur_pool
+ cur_pool.update_from_volume_capability(pool_cap, service)
+
+ active_pools.add(pool_name)
+ elif pools is None:
+ # To handle legacy driver that doesn't report pool
+ # information in the capability, we have to prepare
+ # a pool from backend level info, or to update the one
+ # we created in self.pools.
+ pool_name = self.volume_backend_name
+ if pool_name is None:
+ # To get DEFAULT_POOL_NAME
+ pool_name = vol_utils.extract_host(self.host, 'pool', True)
+
+ if len(self.pools) == 0:
+ # No pool was there
+ single_pool = PoolState(self.host, capability, pool_name)
+ self._append_backend_info(capability)
+ self.pools[pool_name] = single_pool
+ else:
+ # this is a update from legacy driver
+ try:
+ single_pool = self.pools[pool_name]
+ except KeyError:
+ single_pool = PoolState(self.host, capability, pool_name)
+ self._append_backend_info(capability)
+ self.pools[pool_name] = single_pool
+
+ single_pool.update_from_volume_capability(capability, service)
+ active_pools.add(pool_name)
+
+ # remove non-active pools from self.pools
+ nonactive_pools = set(self.pools.keys()) - active_pools
+ for pool in nonactive_pools:
+ LOG.debug("Removing non-active pool %(pool)s @ %(host)s "
+ "from scheduler cache." % {'pool': pool,
+ 'host': self.host})
+ del self.pools[pool]
+
+ def _append_backend_info(self, pool_cap):
+ # Fill backend level info to pool if needed.
+ if not pool_cap.get('volume_backend_name', None):
+ pool_cap['volume_backend_name'] = self.volume_backend_name
+
+ if not pool_cap.get('storage_protocol', None):
+ pool_cap['storage_protocol'] = self.storage_protocol
+
+ if not pool_cap.get('vendor_name', None):
+ pool_cap['vendor_name'] = self.vendor_name
+
+ if not pool_cap.get('driver_version', None):
+ pool_cap['driver_version'] = self.driver_version
+
+ if not pool_cap.get('timestamp', None):
+ pool_cap['timestamp'] = self.updated
+
+ def update_backend(self, capability):
+ self.volume_backend_name = capability.get('volume_backend_name', None)
+ self.vendor_name = capability.get('vendor_name', None)
+ self.driver_version = capability.get('driver_version', None)
+ self.storage_protocol = capability.get('storage_protocol', None)
+ self.updated = capability['timestamp']
def consume_from_volume(self, volume):
"""Incrementally update host state from an volume."""
self.updated = timeutils.utcnow()
def __repr__(self):
- return ("host '%s': free_capacity_gb: %s" %
- (self.host, self.free_capacity_gb))
+ # FIXME(zhiteng) backend level free_capacity_gb isn't as
+ # meaningful as it used to be before pool is introduced, we'd
+ # come up with better representation of HostState.
+ return ("host '%s': free_capacity_gb: %s, pools: %s" %
+ (self.host, self.free_capacity_gb, self.pools))
+
+
+class PoolState(HostState):
+ def __init__(self, host, capabilities, pool_name):
+ new_host = vol_utils.append_host(host, pool_name)
+ super(PoolState, self).__init__(new_host, capabilities)
+ self.pool_name = pool_name
+ # No pools in pool
+ self.pools = None
+
+ def update_from_volume_capability(self, capability, service=None):
+ """Update information about a pool from its volume_node info."""
+ self.update_capabilities(capability, service)
+ if capability:
+ if self.updated and self.updated > capability['timestamp']:
+ return
+ self.update_backend(capability)
+
+ self.total_capacity_gb = capability['total_capacity_gb']
+ self.free_capacity_gb = capability['free_capacity_gb']
+ self.allocated_capacity_gb = capability.get(
+ 'allocated_capacity_gb', 0)
+ self.QoS_support = capability.get('QoS_support', False)
+ self.reserved_percentage = capability['reserved_percentage']
+
+ def update_pools(self, capability):
+ # Do nothing, since we don't have pools within pool, yet
+ pass
class HostManager(object):
{'service_name': service_name, 'host': host})
return
- LOG.debug("Received %(service_name)s service update from "
- "%(host)s." %
- {'service_name': service_name, 'host': host})
-
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
self.service_states[host] = capab_copy
+ LOG.debug("Received %(service_name)s service update from "
+ "%(host)s: %(cap)s" %
+ {'service_name': service_name, 'host': host,
+ 'cap': capabilities})
+
def get_all_host_states(self, context):
"""Returns a dict of all the hosts the HostManager knows about.
continue
capabilities = self.service_states.get(host, None)
host_state = self.host_state_map.get(host)
- if host_state:
- # copy capabilities to host_state.capabilities
- host_state.update_capabilities(capabilities,
- dict(service.iteritems()))
- else:
+ if not host_state:
host_state = self.host_state_cls(host,
capabilities=capabilities,
service=
dict(service.iteritems()))
self.host_state_map[host] = host_state
- # update attributes in host_state that scheduler is interested in
- host_state.update_from_volume_capability(capabilities)
+ # update capabilities and attributes in host_state
+ host_state.update_from_volume_capability(capabilities,
+ service=
+ dict(service.iteritems()))
active_hosts.add(host)
# remove non-active hosts from host_state_map
"scheduler cache.") % {'host': host})
del self.host_state_map[host]
- return self.host_state_map.itervalues()
+ # build a pool_state map and return that map instead of host_state_map
+ all_pools = {}
+ for host in active_hosts:
+ state = self.host_state_map[host]
+ for key in state.pools:
+ pool = state.pools[key]
+ # use host.pool_name to make sure key is unique
+ pool_key = '.'.join([host, pool.pool_name])
+ all_pools[pool_key] = pool
+
+ return all_pools.itervalues()
consistencygroup['description'] = description
consistencygroup['volume_type_id'] = volume_type_id
consistencygroup['status'] = status
+ consistencygroup['host'] = 'fakehost'
return db.consistencygroup_create(
context.get_admin_context(),
consistencygroup)['id']
size=1):
"""Create a volume object."""
vol = {}
+ vol['host'] = 'fake_host'
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
'volume_backend_name': 'lvm4',
'timestamp': None,
'consistencygroup_support': True},
+ 'host5': {'total_capacity_gb': 2048,
+ 'free_capacity_gb': 500,
+ 'allocated_capacity_gb': 1548,
+ 'reserved_percentage': 5,
+ 'timestamp': None},
}
availability_zone='zone2', updated_at=timeutils.utcnow()),
dict(id=4, host='host4', topic='volume', disabled=False,
availability_zone='zone3', updated_at=timeutils.utcnow()),
- # service on host5 is disabled
- dict(id=5, host='host5', topic='volume', disabled=True,
- availability_zone='zone4', updated_at=timeutils.utcnow()),
+ dict(id=5, host='host5', topic='volume', disabled=False,
+ availability_zone='zone3', updated_at=timeutils.utcnow()),
]
if disabled is None:
mock_obj.return_value = services
from cinder.scheduler.weights.capacity import AllocatedCapacityWeigher as ACW
from cinder import test
from cinder.tests.scheduler import fakes
+from cinder.volume import utils
CONF = cfg.CONF
# so, host1 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 0)
- self.assertEqual(weighed_host.obj.host, 'host1')
+ self.assertEqual(
+ utils.extract_host(weighed_host.obj.host), 'host1')
def test_capacity_weight_multiplier1(self):
self.flags(allocated_capacity_weight_multiplier=1.0)
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 1848.0)
- self.assertEqual(weighed_host.obj.host, 'host4')
+ self.assertEqual(
+ utils.extract_host(weighed_host.obj.host), 'host4')
def test_capacity_weight_multiplier2(self):
self.flags(allocated_capacity_weight_multiplier=-2.0)
# so, host1 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 0)
- self.assertEqual(weighed_host.obj.host, 'host1')
+ self.assertEqual(
+ utils.extract_host(weighed_host.obj.host), 'host1')
from cinder.scheduler.weights.capacity import CapacityWeigher
from cinder import test
from cinder.tests.scheduler import fakes
+from cinder.volume import utils
CONF = cfg.CONF
# so, host1 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 921.0)
- self.assertEqual(weighed_host.obj.host, 'host1')
+ self.assertEqual(
+ utils.extract_host(weighed_host.obj.host), 'host1')
def test_capacity_weight_multiplier1(self):
self.flags(capacity_weight_multiplier=-1.0)
# so, host4 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, -190.0)
- self.assertEqual(weighed_host.obj.host, 'host4')
+ self.assertEqual(
+ utils.extract_host(weighed_host.obj.host), 'host4')
def test_capacity_weight_multiplier2(self):
self.flags(capacity_weight_multiplier=2.0)
# so, host1 should win:
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, 921.0 * 2)
- self.assertEqual(weighed_host.obj.host, 'host1')
+ self.assertEqual(
+ utils.extract_host(weighed_host.obj.host), 'host1')
from cinder.scheduler import host_manager
from cinder.tests.scheduler import fakes
from cinder.tests.scheduler import test_scheduler
+from cinder.volume import utils
class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
'volume_type': {'name': 'LVM_iSCSI'},
'volume_properties': {'project_id': 1,
'size': 1}}
- ret_host = sched.host_passes_filters(ctx, 'host1', request_spec, {})
- self.assertEqual(ret_host.host, 'host1')
+ ret_host = sched.host_passes_filters(ctx, 'host1#lvm1',
+ request_spec, {})
+ self.assertEqual(utils.extract_host(ret_host.host), 'host1')
+ self.assertTrue(_mock_service_get_topic.called)
+
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_host_passes_filters_default_pool_happy_day(
+ self, _mock_service_get_topic):
+ """Do a successful pass through of with host_passes_filters()."""
+ sched, ctx = self._host_passes_filters_setup(
+ _mock_service_get_topic)
+ request_spec = {'volume_id': 1,
+ 'volume_type': {'name': 'LVM_iSCSI'},
+ 'volume_properties': {'project_id': 1,
+ 'size': 1}}
+ ret_host = sched.host_passes_filters(ctx, 'host5#_pool0',
+ request_spec, {})
+ self.assertEqual(utils.extract_host(ret_host.host), 'host5')
self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all_by_topic')
'size': 1024}}
self.assertRaises(exception.NoValidHost,
sched.host_passes_filters,
- ctx, 'host1', request_spec, {})
+ ctx, 'host1#lvm1', request_spec, {})
self.assertTrue(_mock_service_get_topic.called)
@mock.patch('cinder.db.service_get_all_by_topic')
'extra_specs': extra_specs},
'volume_properties': {'project_id': 1,
'size': 200,
- 'host': 'host4'}}
+ 'host': 'host4#lvm4'}}
+ host_state = sched.find_retype_host(ctx, request_spec,
+ filter_properties={},
+ migration_policy='never')
+ self.assertEqual(utils.extract_host(host_state.host), 'host4')
+
+ @mock.patch('cinder.db.service_get_all_by_topic')
+ def test_retype_with_pool_policy_never_migrate_pass(
+ self, _mock_service_get_topic):
+ # Retype should pass if current host passes filters and
+ # policy=never. host4 doesn't have enough space to hold an additional
+ # 200GB, but it is already the host of this volume and should not be
+ # counted twice.
+ sched, ctx = self._host_passes_filters_setup(
+ _mock_service_get_topic)
+ extra_specs = {'volume_backend_name': 'lvm3'}
+ request_spec = {'volume_id': 1,
+ 'volume_type': {'name': 'LVM_iSCSI',
+ 'extra_specs': extra_specs},
+ 'volume_properties': {'project_id': 1,
+ 'size': 200,
+ 'host': 'host3#lvm3'}}
host_state = sched.find_retype_host(ctx, request_spec,
filter_properties={},
migration_policy='never')
- self.assertEqual(host_state.host, 'host4')
+ self.assertEqual(host_state.host, 'host3#lvm3')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_retype_policy_never_migrate_fail(self, _mock_service_get_topic):
host_state = sched.find_retype_host(ctx, request_spec,
filter_properties={},
migration_policy='on-demand')
- self.assertEqual(host_state.host, 'host1')
+ self.assertEqual(utils.extract_host(host_state.host), 'host1')
@mock.patch('cinder.db.service_get_all_by_topic')
def test_retype_policy_demand_migrate_fail(self, _mock_service_get_topic):
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ @mock.patch('cinder.utils.service_is_up')
+ def test_capacity_filter_current_host_passes(self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['CapacityFilter']()
+ filter_properties = {'size': 100, 'vol_exists_on': 'host1'}
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1',
+ {'free_capacity_gb': 10,
+ 'updated_at': None,
+ 'service': service})
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
@mock.patch('cinder.utils.service_is_up')
def test_capacity_filter_fails(self, _mock_serv_is_up):
_mock_serv_is_up.return_value = True
_mock_serv_is_up.return_value = True
filt_cls = self.class_map['DifferentBackendFilter']()
service = {'disabled': False}
- host = fakes.FakeHostState('host2',
+ host = fakes.FakeHostState('host1:pool0',
+ {'free_capacity_gb': '1000',
+ 'updated_at': None,
+ 'service': service})
+ volume = utils.create_volume(self.context, host='host1:pool1')
+ vol_id = volume.id
+
+ filter_properties = {'context': self.context.elevated(),
+ 'scheduler_hints': {
+ 'different_host': [vol_id], }}
+
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
+ @mock.patch('cinder.utils.service_is_up')
+ def test_affinity_different_filter_legacy_volume_hint_passes(
+ self, _mock_serv_is_up):
+ _mock_serv_is_up.return_value = True
+ filt_cls = self.class_map['DifferentBackendFilter']()
+ service = {'disabled': False}
+ host = fakes.FakeHostState('host1:pool0',
{'free_capacity_gb': '1000',
'updated_at': None,
'service': service})
self.assertTrue(filt_cls.host_passes(host, filter_properties))
- def test_affinity_different_filter_no_list_passes(self):
+ def test_affinity_different_filter_non_list_fails(self):
filt_cls = self.class_map['DifferentBackendFilter']()
host = fakes.FakeHostState('host2', {})
volume = utils.create_volume(self.context, host='host2')
def test_affinity_different_filter_handles_multiple_uuids(self):
filt_cls = self.class_map['DifferentBackendFilter']()
- host = fakes.FakeHostState('host1', {})
- volume1 = utils.create_volume(self.context, host='host2')
+ host = fakes.FakeHostState('host1#pool0', {})
+ volume1 = utils.create_volume(self.context, host='host1:pool1')
vol_id1 = volume1.id
- volume2 = utils.create_volume(self.context, host='host3')
+ volume2 = utils.create_volume(self.context, host='host1:pool3')
vol_id2 = volume2.id
filter_properties = {'context': self.context.elevated(),
def test_affinity_same_filter_passes(self):
filt_cls = self.class_map['SameBackendFilter']()
- host = fakes.FakeHostState('host1', {})
- volume = utils.create_volume(self.context, host='host1')
+ host = fakes.FakeHostState('host1#pool0', {})
+ volume = utils.create_volume(self.context, host='host1#pool0')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
self.assertTrue(filt_cls.host_passes(host, filter_properties))
+ def test_affinity_same_filter_legacy_vol_fails(self):
+ filt_cls = self.class_map['SameBackendFilter']()
+ host = fakes.FakeHostState('host1#pool0', {})
+ volume = utils.create_volume(self.context, host='host1')
+ vol_id = volume.id
+
+ filter_properties = {'context': self.context.elevated(),
+ 'scheduler_hints': {
+ 'same_host': [vol_id], }}
+
+ self.assertFalse(filt_cls.host_passes(host, filter_properties))
+
def test_affinity_same_filter_fails(self):
filt_cls = self.class_map['SameBackendFilter']()
- host = fakes.FakeHostState('host1', {})
- volume = utils.create_volume(self.context, host='host2')
+ host = fakes.FakeHostState('host1#pool0', {})
+ volume = utils.create_volume(self.context, host='host1#pool1')
vol_id = volume.id
filter_properties = {'context': self.context.elevated(),
self.assertFalse(filt_cls.host_passes(host, filter_properties))
+ def test_affinity_same_filter_vol_list_pass(self):
+ filt_cls = self.class_map['SameBackendFilter']()
+ host = fakes.FakeHostState('host1', {})
+ volume1 = utils.create_volume(self.context, host='host1')
+ vol_id1 = volume1.id
+ volume2 = utils.create_volume(self.context, host='host2')
+ vol_id2 = volume2.id
+
+ filter_properties = {'context': self.context.elevated(),
+ 'scheduler_hints': {
+ 'same_host': [vol_id1, vol_id2], }}
+
+ self.assertTrue(filt_cls.host_passes(host, filter_properties))
+
def test_affinity_same_filter_handles_none(self):
filt_cls = self.class_map['SameBackendFilter']()
host = fakes.FakeHostState('host1', {})
class HostStateTestCase(test.TestCase):
"""Test case for HostState class."""
- def test_update_from_volume_capability(self):
+ def test_update_from_volume_capability_nopool(self):
fake_host = host_manager.HostState('host1')
self.assertIsNone(fake_host.free_capacity_gb)
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
- self.assertEqual(fake_host.free_capacity_gb, 512)
+ # Backend level stats remain uninitialized
+ self.assertEqual(fake_host.total_capacity_gb, 0)
+ self.assertEqual(fake_host.free_capacity_gb, None)
+ # Pool stats has been updated
+ self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb, 1024)
+ self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb, 512)
+
+ # Test update for existing host state
+ volume_capability.update(dict(total_capacity_gb=1000))
+ fake_host.update_from_volume_capability(volume_capability)
+ self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb, 1000)
+
+ # Test update for existing host state with different backend name
+ volume_capability.update(dict(volume_backend_name='magic'))
+ fake_host.update_from_volume_capability(volume_capability)
+ self.assertEqual(fake_host.pools['magic'].total_capacity_gb, 1000)
+ self.assertEqual(fake_host.pools['magic'].free_capacity_gb, 512)
+ # 'pool0' becomes nonactive pool, and is deleted
+ self.assertRaises(KeyError, lambda: fake_host.pools['pool0'])
+
+ def test_update_from_volume_capability_with_pools(self):
+ fake_host = host_manager.HostState('host1')
+ self.assertIsNone(fake_host.free_capacity_gb)
+ capability = {
+ 'volume_backend_name': 'Local iSCSI',
+ 'vendor_name': 'OpenStack',
+ 'driver_version': '1.0.1',
+ 'storage_protocol': 'iSCSI',
+ 'pools': [
+ {'pool_name': '1st pool',
+ 'total_capacity_gb': 500,
+ 'free_capacity_gb': 230,
+ 'allocated_capacity_gb': 270,
+ 'QoS_support': 'False',
+ 'reserved_percentage': 0,
+ 'dying_disks': 100,
+ 'super_hero_1': 'spider-man',
+ 'super_hero_2': 'flash',
+ 'super_hero_3': 'neoncat',
+ },
+ {'pool_name': '2nd pool',
+ 'total_capacity_gb': 1024,
+ 'free_capacity_gb': 1024,
+ 'allocated_capacity_gb': 0,
+ 'QoS_support': 'False',
+ 'reserved_percentage': 0,
+ 'dying_disks': 200,
+ 'super_hero_1': 'superman',
+ 'super_hero_2': ' ',
+ 'super_hero_2': 'Hulk',
+ }
+ ],
+ 'timestamp': None,
+ }
+
+ fake_host.update_from_volume_capability(capability)
+
+ self.assertEqual(fake_host.volume_backend_name, 'Local iSCSI')
+ self.assertEqual(fake_host.storage_protocol, 'iSCSI')
+ self.assertEqual(fake_host.vendor_name, 'OpenStack')
+ self.assertEqual(fake_host.driver_version, '1.0.1')
+
+ # Backend level stats remain uninitialized
+ self.assertEqual(fake_host.total_capacity_gb, 0)
+ self.assertEqual(fake_host.free_capacity_gb, None)
+ # Pool stats has been updated
+ self.assertEqual(len(fake_host.pools), 2)
+
+ self.assertEqual(fake_host.pools['1st pool'].total_capacity_gb, 500)
+ self.assertEqual(fake_host.pools['1st pool'].free_capacity_gb, 230)
+ self.assertEqual(fake_host.pools['2nd pool'].total_capacity_gb, 1024)
+ self.assertEqual(fake_host.pools['2nd pool'].free_capacity_gb, 1024)
+
+ capability = {
+ 'volume_backend_name': 'Local iSCSI',
+ 'vendor_name': 'OpenStack',
+ 'driver_version': '1.0.2',
+ 'storage_protocol': 'iSCSI',
+ 'pools': [
+ {'pool_name': '3rd pool',
+ 'total_capacity_gb': 10000,
+ 'free_capacity_gb': 10000,
+ 'allocated_capacity_gb': 0,
+ 'QoS_support': 'False',
+ 'reserved_percentage': 0,
+ },
+ ],
+ 'timestamp': None,
+ }
+
+ # test update HostState Record
+ fake_host.update_from_volume_capability(capability)
+
+ self.assertEqual(fake_host.driver_version, '1.0.2')
+
+ # Non-active pool stats has been removed
+ self.assertEqual(len(fake_host.pools), 1)
+
+ self.assertRaises(KeyError, lambda: fake_host.pools['1st pool'])
+ self.assertRaises(KeyError, lambda: fake_host.pools['2nd pool'])
+
+ self.assertEqual(fake_host.pools['3rd pool'].total_capacity_gb, 10000)
+ self.assertEqual(fake_host.pools['3rd pool'].free_capacity_gb, 10000)
def test_update_from_volume_infinite_capability(self):
fake_host = host_manager.HostState('host1')
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
- self.assertEqual(fake_host.total_capacity_gb, 'infinite')
- self.assertEqual(fake_host.free_capacity_gb, 'infinite')
+ # Backend level stats remain uninitialized
+ self.assertEqual(fake_host.total_capacity_gb, 0)
+ self.assertEqual(fake_host.free_capacity_gb, None)
+ # Pool stats has been updated
+ self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb,
+ 'infinite')
+ self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb,
+ 'infinite')
def test_update_from_volume_unknown_capability(self):
fake_host = host_manager.HostState('host1')
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
- self.assertEqual(fake_host.total_capacity_gb, 'infinite')
- self.assertEqual(fake_host.free_capacity_gb, 'unknown')
+ # Backend level stats remain uninitialized
+ self.assertEqual(fake_host.total_capacity_gb, 0)
+ self.assertEqual(fake_host.free_capacity_gb, None)
+ # Pool stats has been updated
+ self.assertEqual(fake_host.pools['_pool0'].total_capacity_gb,
+ 'infinite')
+ self.assertEqual(fake_host.pools['_pool0'].free_capacity_gb,
+ 'unknown')
+
+
+class PoolStateTestCase(test.TestCase):
+ """Test case for HostState class."""
+
+ def test_update_from_volume_capability(self):
+ fake_pool = host_manager.PoolState('host1', None, 'pool0')
+ self.assertIsNone(fake_pool.free_capacity_gb)
+
+ volume_capability = {'total_capacity_gb': 1024,
+ 'free_capacity_gb': 512,
+ 'reserved_percentage': 0,
+ 'timestamp': None,
+ 'cap1': 'val1',
+ 'cap2': 'val2'}
+
+ fake_pool.update_from_volume_capability(volume_capability)
+ self.assertEqual(fake_pool.host, 'host1#pool0')
+ self.assertEqual(fake_pool.pool_name, 'pool0')
+ self.assertEqual(fake_pool.total_capacity_gb, 1024)
+ self.assertEqual(fake_pool.free_capacity_gb, 512)
+
+ self.assertDictMatch(fake_pool.capabilities, volume_capability)
from cinder.scheduler.weights.volume_number import VolumeNumberWeigher
from cinder import test
from cinder.tests.scheduler import fakes
+from cinder.volume import utils
CONF = cfg.CONF
def fake_volume_data_get_for_host(context, host, count_only=False):
+ host = utils.extract_host(host)
if host == 'host1':
return 1
elif host == 'host2':
return 3
elif host == 'host4':
return 4
+ elif host == 'host5':
+ return 5
else:
- return 1
+ return 6
class VolumeNumberWeigherTestCase(test.TestCase):
# host2: 2 volumes
# host3: 3 volumes
# host4: 4 volumes
+ # host5: 5 volumes
# so, host1 should win:
with mock.patch.object(api, 'volume_data_get_for_host',
fake_volume_data_get_for_host):
weighed_host = self._get_weighed_host(hostinfo_list)
self.assertEqual(weighed_host.weight, -1.0)
- self.assertEqual(weighed_host.obj.host, 'host1')
+ self.assertEqual(utils.extract_host(weighed_host.obj.host),
+ 'host1')
def test_volume_number_weight_multiplier2(self):
self.flags(volume_number_multiplier=1.0)
# host2: 2 volumes
# host3: 3 volumes
# host4: 4 volumes
- # so, host4 should win:
+ # host5: 5 volumes
+ # so, host5 should win:
with mock.patch.object(api, 'volume_data_get_for_host',
fake_volume_data_get_for_host):
weighed_host = self._get_weighed_host(hostinfo_list)
- self.assertEqual(weighed_host.weight, 4.0)
- self.assertEqual(weighed_host.obj.host, 'host4')
+ self.assertEqual(weighed_host.weight, 5.0)
+ self.assertEqual(utils.extract_host(weighed_host.obj.host),
+ 'host5')
db.volume_get_all_by_host(
self.ctxt, 'h%d' % i))
+ def test_volume_get_all_by_host_with_pools(self):
+ volumes = []
+ vol_on_host_wo_pool = [db.volume_create(self.ctxt, {'host': 'foo'})
+ for j in xrange(3)]
+ vol_on_host_w_pool = [db.volume_create(
+ self.ctxt, {'host': 'foo#pool0'})]
+ volumes.append((vol_on_host_wo_pool +
+ vol_on_host_w_pool))
+ # insert an additional record that doesn't belongs to the same
+ # host as 'foo' and test if it is included in the result
+ db.volume_create(self.ctxt, {'host': 'foobar'})
+ self._assertEqualListsOfObjects(volumes[0],
+ db.volume_get_all_by_host(
+ self.ctxt, 'foo'))
+
def test_volume_get_all_by_project(self):
volumes = []
for i in xrange(3):
vol['size'] = size
vol['status'] = 'available'
vol['volume_type_id'] = self.volume_type['id']
+ vol['host'] = 'fake_host'
return db.volume_create(self.context, vol)
def _create_snapshot(self, volume):
snapshot['project_id'] = self.project_id
snapshot['volume_id'] = volume['id']
snapshot['volume_size'] = volume['size']
+ snapshot['host'] = volume['host']
snapshot['status'] = 'available'
return db.snapshot_create(self.context, snapshot)
# image.fake has been converted to mock.
fake_image.stub_out_image_service(self.stubs)
self.volume.driver.set_initialized()
- self.volume.stats = {'allocated_capacity_gb': 0}
+ self.volume.stats = {'allocated_capacity_gb': 0,
+ 'pools': {}}
self.called = []
def _create_volume_from_image(self, expected_status, raw=False,
self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
self.stubs.Set(os.path, 'exists', lambda x: True)
self.volume.driver.set_initialized()
- self.volume.stats = {'allocated_capacity_gb': 0}
+ self.volume.stats = {'allocated_capacity_gb': 0,
+ 'pools': {}}
# keep ordered record of what we execute
self.called = []
self.assertRaises(exception.VolumeNotFound, db.volume_get,
context.get_admin_context(), volume_id)
+ def test_init_host_count_allocated_capacity(self):
+ vol0 = tests_utils.create_volume(
+ self.context, size=100, host=CONF.host)
+ vol1 = tests_utils.create_volume(
+ self.context, size=128,
+ host=volutils.append_host(CONF.host, 'pool0'))
+ vol2 = tests_utils.create_volume(
+ self.context, size=256,
+ host=volutils.append_host(CONF.host, 'pool0'))
+ vol3 = tests_utils.create_volume(
+ self.context, size=512,
+ host=volutils.append_host(CONF.host, 'pool1'))
+ vol4 = tests_utils.create_volume(
+ self.context, size=1024,
+ host=volutils.append_host(CONF.host, 'pool2'))
+ self.volume.init_host()
+ stats = self.volume.stats
+ self.assertEqual(stats['allocated_capacity_gb'], 2020)
+ self.assertEqual(
+ stats['pools']['pool0']['allocated_capacity_gb'], 384)
+ self.assertEqual(
+ stats['pools']['pool1']['allocated_capacity_gb'], 512)
+ self.assertEqual(
+ stats['pools']['pool2']['allocated_capacity_gb'], 1024)
+
+ vol0 = db.volume_get(context.get_admin_context(), vol0['id'])
+ self.assertEqual(vol0['host'],
+ volutils.append_host(CONF.host, 'LVM_iSCSI'))
+ self.volume.delete_volume(self.context, vol0['id'])
+ self.volume.delete_volume(self.context, vol1['id'])
+ self.volume.delete_volume(self.context, vol2['id'])
+ self.volume.delete_volume(self.context, vol3['id'])
+ self.volume.delete_volume(self.context, vol4['id'])
+
@mock.patch.object(QUOTAS, 'reserve')
@mock.patch.object(QUOTAS, 'commit')
@mock.patch.object(QUOTAS, 'rollback')
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.return_value = fake_capabilities
manager = VolumeManager()
+ manager.stats = {'pools': {}}
manager.driver.set_initialized()
manager.publish_service_capabilities(self.context)
self.assertTrue(mock_loads.called)
'name',
'description',
volume_type=db_vol_type)
+
+ volume_src['host'] = 'fake_host'
snapshot_ref = volume_api.create_snapshot_force(self.context,
volume_src,
'name',
stats = self.volume.driver._stats
- self.assertEqual(stats['total_capacity_gb'], float('5.52'))
- self.assertEqual(stats['free_capacity_gb'], float('0.52'))
+ self.assertEqual(
+ stats['pools'][0]['total_capacity_gb'], float('5.52'))
+ self.assertEqual(
+ stats['pools'][0]['free_capacity_gb'], float('0.52'))
def test_validate_connector(self):
iscsi_driver = self.base_driver(configuration=self.configuration)
iscsi_driver.validate_connector, connector)
-class ISERTestCase(ISCSITestCase):
+class ISERTestCase(DriverTestCase):
"""Test Case for ISERDriver."""
driver_name = "cinder.volume.drivers.lvm.LVMISERDriver"
base_driver = driver.ISERDriver
def setUp(self):
super(ISERTestCase, self).setUp()
- self.configuration = mox.MockObject(conf.Configuration)
+ self.configuration = mock.Mock(conf.Configuration)
+ self.configuration.safe_get.return_value = None
self.configuration.num_iser_scan_tries = 3
self.configuration.iser_num_targets = 100
self.configuration.iser_target_prefix = 'iqn.2010-10.org.openstack:'
stats = self.volume.driver.get_volume_stats(refresh=True)
- self.assertEqual(stats['total_capacity_gb'], float('5.52'))
- self.assertEqual(stats['free_capacity_gb'], float('0.52'))
+ self.assertEqual(
+ stats['pools'][0]['total_capacity_gb'], float('5.52'))
+ self.assertEqual(
+ stats['pools'][0]['free_capacity_gb'], float('0.52'))
self.assertEqual(stats['storage_protocol'], 'iSER')
def test_get_volume_stats2(self):
stats = iser_driver.get_volume_stats(refresh=True)
- self.assertEqual(stats['total_capacity_gb'], 'infinite')
- self.assertEqual(stats['free_capacity_gb'], 'infinite')
+ self.assertEqual(
+ stats['pools'][0]['total_capacity_gb'], 0)
+ self.assertEqual(
+ stats['pools'][0]['free_capacity_gb'], 0)
self.assertEqual(stats['storage_protocol'], 'iSER')
if c in 'abcdefghijklmnopqrstuvwxyz'])
self.assertTrue([c for c in password
if c in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'])
+
+ def test_extract_host(self):
+ host = 'Host'
+ # default level is 'backend'
+ self.assertEqual(
+ volume_utils.extract_host(host), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'host'), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'backend'), 'Host')
+ # default_pool_name doesn't work for level other than 'pool'
+ self.assertEqual(
+ volume_utils.extract_host(host, 'host', True), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'host', False), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'backend', True), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'backend', False), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'pool'), None)
+ self.assertEqual(
+ volume_utils.extract_host(host, 'pool', True), '_pool0')
+
+ host = 'Host@Backend'
+ self.assertEqual(
+ volume_utils.extract_host(host), 'Host@Backend')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'host'), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'backend'), 'Host@Backend')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'pool'), None)
+ self.assertEqual(
+ volume_utils.extract_host(host, 'pool', True), '_pool0')
+
+ host = 'Host@Backend#Pool'
+ self.assertEqual(
+ volume_utils.extract_host(host), 'Host@Backend')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'host'), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'backend'), 'Host@Backend')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'pool'), 'Pool')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'pool', True), 'Pool')
+
+ host = 'Host#Pool'
+ self.assertEqual(
+ volume_utils.extract_host(host), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'host'), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'backend'), 'Host')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'pool'), 'Pool')
+ self.assertEqual(
+ volume_utils.extract_host(host, 'pool', True), 'Pool')
+
+ def test_append_host(self):
+ host = 'Host'
+ pool = 'Pool'
+ expected = 'Host#Pool'
+ self.assertEqual(expected,
+ volume_utils.append_host(host, pool))
+
+ pool = None
+ expected = 'Host'
+ self.assertEqual(expected,
+ volume_utils.append_host(host, pool))
+
+ host = None
+ pool = 'pool'
+ expected = None
+ self.assertEqual(expected,
+ volume_utils.append_host(host, pool))
+
+ host = None
+ pool = None
+ expected = None
+ self.assertEqual(expected,
+ volume_utils.append_host(host, pool))
disabled=False)
found = False
for service in services:
- if utils.service_is_up(service) and service['host'] == host:
+ svc_host = volume_utils.extract_host(host, 'backend')
+ if utils.service_is_up(service) and service['host'] == svc_host:
found = True
if not found:
msg = (_('No available service named %s') % host)
if availability_zone is None:
elevated = context.elevated()
try:
+ svc_host = volume_utils.extract_host(host, 'backend')
service = self.db.service_get_by_host_and_topic(
- elevated, host, CONF.volume_topic)
+ elevated, svc_host, CONF.volume_topic)
except exception.ServiceNotFound:
with excutils.save_and_reraise_exception():
LOG.error(_('Unable to find service for given host.'))
self.set_execute(execute)
self._stats = {}
+ self.pools = []
+
# set True by manager after successful check_for_setup
self._initialized = False
"""Deletes a cgsnapshot."""
raise NotImplementedError()
+ def get_pool(self, volume):
+ """Return pool name where volume reside on.
+
+ :param volume: The volume hosted by the the driver.
+ :return: name of the pool where given volume is in.
+ """
+ return None
+
class ISCSIDriver(VolumeDriver):
"""Executes commands relating to ISCSI volumes.
data["vendor_name"] = 'Open Source'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSCSI'
-
- data['total_capacity_gb'] = 'infinite'
- data['free_capacity_gb'] = 'infinite'
- data['reserved_percentage'] = 100
- data['QoS_support'] = False
+ data["pools"] = []
+
+ if self.pools:
+ for pool in self.pools:
+ new_pool = {}
+ new_pool.update(dict(
+ pool_name=pool,
+ total_capacity_gb=0,
+ free_capacity_gb=0,
+ reserved_percentage=100,
+ QoS_support=False
+ ))
+ data["pools"].append(new_pool)
+ else:
+ # No pool configured, the whole backend will be treated as a pool
+ single_pool = {}
+ single_pool.update(dict(
+ pool_name=data["volume_backend_name"],
+ total_capacity_gb=0,
+ free_capacity_gb=0,
+ reserved_percentage=100,
+ QoS_support=False
+ ))
+ data["pools"].append(single_pool)
self._stats = data
def get_target_helper(self, db):
data["vendor_name"] = 'Open Source'
data["driver_version"] = '1.0'
data["storage_protocol"] = 'iSER'
-
- data['total_capacity_gb'] = 'infinite'
- data['free_capacity_gb'] = 'infinite'
- data['reserved_percentage'] = 100
- data['QoS_support'] = False
+ data["pools"] = []
+
+ if self.pools:
+ for pool in self.pools:
+ new_pool = {}
+ new_pool.update(dict(
+ pool_name=pool,
+ total_capacity_gb=0,
+ free_capacity_gb=0,
+ reserved_percentage=100,
+ QoS_support=False
+ ))
+ data["pools"].append(new_pool)
+ else:
+ # No pool configured, the whole backend will be treated as a pool
+ single_pool = {}
+ single_pool.update(dict(
+ pool_name=data["volume_backend_name"],
+ total_capacity_gb=0,
+ free_capacity_gb=0,
+ reserved_percentage=100,
+ QoS_support=False
+ ))
+ data["pools"].append(single_pool)
self._stats = data
def get_target_helper(self, db):
data["vendor_name"] = 'Open Source'
data["driver_version"] = self.VERSION
data["storage_protocol"] = self.protocol
+ data["pools"] = []
+ total_capacity = 0
+ free_capacity = 0
if self.configuration.lvm_mirrors > 0:
- data['total_capacity_gb'] =\
+ total_capacity = \
self.vg.vg_mirror_size(self.configuration.lvm_mirrors)
- data['free_capacity_gb'] =\
+ free_capacity = \
self.vg.vg_mirror_free_space(self.configuration.lvm_mirrors)
elif self.configuration.lvm_type == 'thin':
- data['total_capacity_gb'] = self.vg.vg_thin_pool_size
- data['free_capacity_gb'] = self.vg.vg_thin_pool_free_space
+ total_capacity = self.vg.vg_thin_pool_size
+ free_capacity = self.vg.vg_thin_pool_free_space
else:
- data['total_capacity_gb'] = self.vg.vg_size
- data['free_capacity_gb'] = self.vg.vg_free_space
- data['reserved_percentage'] = self.configuration.reserved_percentage
- data['QoS_support'] = False
- data['location_info'] =\
+ total_capacity = self.vg.vg_size
+ free_capacity = self.vg.vg_free_space
+
+ location_info = \
('LVMVolumeDriver:%(hostname)s:%(vg)s'
':%(lvm_type)s:%(lvm_mirrors)s' %
{'hostname': self.hostname,
'lvm_type': self.configuration.lvm_type,
'lvm_mirrors': self.configuration.lvm_mirrors})
+ # Skip enabled_pools setting, treat the whole backend as one pool
+ # XXX FIXME if multipool support is added to LVM driver.
+ single_pool = {}
+ single_pool.update(dict(
+ pool_name=data["volume_backend_name"],
+ total_capacity_gb=total_capacity,
+ free_capacity_gb=free_capacity,
+ reserved_percentage=self.configuration.reserved_percentage,
+ location_info=location_info,
+ QoS_support=False,
+ ))
+ data["pools"].append(single_pool)
+
self._stats = data
def extend_volume(self, volume, new_size):
data=exception_message)
return lv_size
+ def get_pool(self, volume):
+ return self.backend_name
+
class LVMISCSIDriver(LVMVolumeDriver, driver.ISCSIDriver):
"""Executes commands relating to ISCSI volumes.
# the volume can not be deleted while its still being created?
volume_ref = self.db.volume_get(context, volume_id)
- # NOTE(vish): so we don't have to get volume from db again before
- # passing it to the driver.
- volume_ref['host'] = self.host
-
return volume_ref
def revert(self, context, volume_id, result, **kwargs):
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
-from cinder.volume import utils as volume_utils
+from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
from eventlet.greenpool import GreenPool
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
+ def _count_allocated_capacity(self, ctxt, volume):
+ pool = vol_utils.extract_host(volume['host'], 'pool')
+ if pool is None:
+ # No pool name encoded in host, so this is a legacy
+ # volume created before pool is introduced, ask
+ # driver to provide pool info if it has such
+ # knowledge and update the DB.
+ try:
+ pool = self.driver.get_pool(volume)
+ except Exception as err:
+ LOG.error(_('Failed to fetch pool name for volume: %s'),
+ volume['id'])
+ LOG.exception(err)
+ return
+
+ if pool:
+ new_host = vol_utils.append_host(volume['host'],
+ pool)
+ self.db.volume_update(ctxt, volume['id'],
+ {'host': new_host})
+ else:
+ # Otherwise, put them into a special fixed pool with
+ # volume_backend_name being the pool name, if
+ # volume_backend_name is None, use default pool name.
+ # This is only for counting purpose, doesn't update DB.
+ pool = (self.driver.configuration.safe_get(
+ 'volume_backend_name') or vol_utils.extract_host(
+ volume['host'], 'pool', True))
+ try:
+ pool_stat = self.stats['pools'][pool]
+ except KeyError:
+ # First volume in the pool
+ self.stats['pools'][pool] = dict(
+ allocated_capacity_gb=0)
+ pool_stat = self.stats['pools'][pool]
+ pool_sum = pool_stat['allocated_capacity_gb']
+ pool_sum += volume['size']
+
+ self.stats['pools'][pool]['allocated_capacity_gb'] = pool_sum
+ self.stats['allocated_capacity_gb'] += volume['size']
+
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
- LOG.debug("Re-exporting %s volumes", len(volumes))
+ # FIXME volume count for exporting is wrong
+ LOG.debug("Re-exporting %s volumes" % len(volumes))
try:
- sum = 0
- self.stats.update({'allocated_capacity_gb': sum})
+ self.stats['pools'] = {}
+ self.stats.update({'allocated_capacity_gb': 0})
for volume in volumes:
- if volume['status'] in ['in-use']:
+ # available volume should also be counted into allocated
+ if volume['status'] in ['in-use', 'available']:
# calculate allocated capacity for driver
- sum += volume['size']
- self.stats['allocated_capacity_gb'] = sum
+ self._count_allocated_capacity(ctxt, volume)
+
try:
self.driver.ensure_export(ctxt, volume)
except Exception as export_ex:
_run_flow_locked()
# Fetch created volume from storage
- volume_ref = flow_engine.storage.fetch('volume')
+ vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
- self.stats['allocated_capacity_gb'] += volume_ref['size']
- return volume_ref['id']
+ pool = vol_utils.extract_host(vol_ref['host'], 'pool')
+ if pool is None:
+ # Legacy volume, put them into default pool
+ pool = self.driver.configuration.safe_get(
+ 'volume_backend_name') or vol_utils.extract_host(
+ vol_ref['host'], 'pool', True)
+
+ try:
+ self.stats['pools'][pool]['allocated_capacity_gb'] \
+ += vol_ref['size']
+ except KeyError:
+ self.stats['pools'][pool] = dict(
+ allocated_capacity_gb=vol_ref['size'])
+
+ return vol_ref['id']
@locked_volume_operation
def delete_volume(self, context, volume_id, unmanage_only=False):
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
- if volume_ref['host'] != self.host:
+ if (vol_utils.extract_host(volume_ref['host']) != self.host):
raise exception.InvalidVolume(
reason=_("volume is not local to this node"))
if reservations:
QUOTAS.commit(context, reservations, project_id=project_id)
- self.stats['allocated_capacity_gb'] -= volume_ref['size']
+ pool = vol_utils.extract_host(volume_ref['host'], 'pool')
+ if pool is None:
+ # Legacy volume, put them into default pool
+ pool = self.driver.configuration.safe_get(
+ 'volume_backend_name') or vol_utils.extract_host(
+ volume_ref['host'], 'pool', True)
+ size = volume_ref['size']
+
+ try:
+ self.stats['pools'][pool]['allocated_capacity_gb'] -= size
+ except KeyError:
+ self.stats['pools'][pool] = dict(
+ allocated_capacity_gb=-size)
+
self.publish_service_capabilities(context)
return True
volume_stats.update(self.extra_capabilities)
if volume_stats:
# Append volume stats with 'allocated_capacity_gb'
- volume_stats.update(self.stats)
+ self._append_volume_stats(volume_stats)
+
# queue it to be sent to the Schedulers.
self.update_service_capabilities(volume_stats)
+ def _append_volume_stats(self, vol_stats):
+ pools = vol_stats.get('pools', None)
+ if pools and isinstance(pools, list):
+ for pool in pools:
+ pool_name = pool['pool_name']
+ try:
+ pool_stats = self.stats['pools'][pool_name]
+ except KeyError:
+ # Pool not found in volume manager
+ pool_stats = dict(allocated_capacity_gb=0)
+
+ pool.update(pool_stats)
+
def publish_service_capabilities(self, context):
"""Collect driver status and then publish."""
self._report_driver_status(context)
volume,
event_suffix,
extra_usage_info=None):
- volume_utils.notify_about_volume_usage(
+ vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
snapshot,
event_suffix,
extra_usage_info=None):
- volume_utils.notify_about_snapshot_usage(
+ vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
group,
event_suffix,
extra_usage_info=None):
- volume_utils.notify_about_consistencygroup_usage(
+ vol_utils.notify_about_consistencygroup_usage(
context, group, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
volumes = self.db.volume_get_all_by_group(context, group['id'])
if volumes:
for volume in volumes:
- volume_utils.notify_about_volume_usage(
+ vol_utils.notify_about_volume_usage(
context, volume, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
cgsnapshot,
event_suffix,
extra_usage_info=None):
- volume_utils.notify_about_cgsnapshot_usage(
+ vol_utils.notify_about_cgsnapshot_usage(
context, cgsnapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
cgsnapshot['id'])
if snapshots:
for snapshot in snapshots:
- volume_utils.notify_about_snapshot_usage(
+ vol_utils.notify_about_snapshot_usage(
context, snapshot, event_suffix,
extra_usage_info=extra_usage_info, host=self.host)
volume['id'],
{'size': int(new_size),
'status': 'available'})
- self.stats['allocated_capacity_gb'] += size_increase
+ pool = vol_utils.extract_host(volume['host'], 'pool')
+ if pool is None:
+ # Legacy volume, put them into default pool
+ pool = self.driver.configuration.safe_get(
+ 'volume_backend_name') or vol_utils.extract_host(
+ volume['host'], 'pool', True)
+
+ try:
+ self.stats['pools'][pool]['allocated_capacity_gb'] += size_increase
+ except KeyError:
+ self.stats['pools'][pool] = dict(
+ allocated_capacity_gb=size_increase)
self._notify_about_volume_usage(
context, volume, "resize.end",
flow_engine.run()
# Fetch created volume from storage
- volume_ref = flow_engine.storage.fetch('volume')
+ vol_ref = flow_engine.storage.fetch('volume')
# Update volume stats
- self.stats['allocated_capacity_gb'] += volume_ref['size']
- return volume_ref['id']
+ pool = vol_utils.extract_host(vol_ref['host'], 'pool')
+ if pool is None:
+ # Legacy volume, put them into default pool
+ pool = self.driver.configuration.safe_get(
+ 'volume_backend_name') or vol_utils.extract_host(
+ vol_ref['host'], 'pool', True)
+
+ try:
+ self.stats['pools'][pool]['allocated_capacity_gb'] \
+ += vol_ref['size']
+ except KeyError:
+ self.stats['pools'][pool] = dict(
+ allocated_capacity_gb=vol_ref['size'])
+
+ return vol_ref['id']
def promote_replica(self, ctxt, volume_id):
"""Promote volume replica secondary to be the primary volume."""
from cinder.openstack.common import jsonutils
from cinder import rpc
+from cinder.volume import utils
CONF = cfg.CONF
self.client = rpc.get_client(target, '1.18')
def create_consistencygroup(self, ctxt, group, host):
- cctxt = self.client.prepare(server=host, version='1.18')
+ new_host = utils.extract_host(host)
+ cctxt = self.client.prepare(server=new_host, version='1.18')
cctxt.cast(ctxt, 'create_consistencygroup',
group_id=group['id'])
def delete_consistencygroup(self, ctxt, group):
- cctxt = self.client.prepare(server=group['host'], version='1.18')
+ host = utils.extract_host(group['host'])
+ cctxt = self.client.prepare(server=host, version='1.18')
cctxt.cast(ctxt, 'delete_consistencygroup',
group_id=group['id'])
def create_cgsnapshot(self, ctxt, group, cgsnapshot):
- cctxt = self.client.prepare(server=group['host'], version='1.18')
+ host = utils.extract_host(group['host'])
+ cctxt = self.client.prepare(server=host, version='1.18')
cctxt.cast(ctxt, 'create_cgsnapshot',
group_id=group['id'],
cgsnapshot_id=cgsnapshot['id'])
def delete_cgsnapshot(self, ctxt, cgsnapshot, host):
- cctxt = self.client.prepare(server=host, version='1.18')
+ new_host = utils.extract_host(host)
+ cctxt = self.client.prepare(server=new_host, version='1.18')
cctxt.cast(ctxt, 'delete_cgsnapshot',
cgsnapshot_id=cgsnapshot['id'])
source_volid=None,
consistencygroup_id=None):
- cctxt = self.client.prepare(server=host, version='1.4')
+ new_host = utils.extract_host(host)
+ cctxt = self.client.prepare(server=new_host, version='1.4')
request_spec_p = jsonutils.to_primitive(request_spec)
cctxt.cast(ctxt, 'create_volume',
volume_id=volume['id'],
consistencygroup_id=consistencygroup_id)
def delete_volume(self, ctxt, volume, unmanage_only=False):
- cctxt = self.client.prepare(server=volume['host'], version='1.15')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.15')
cctxt.cast(ctxt, 'delete_volume',
volume_id=volume['id'],
unmanage_only=unmanage_only)
def create_snapshot(self, ctxt, volume, snapshot):
- cctxt = self.client.prepare(server=volume['host'])
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host)
cctxt.cast(ctxt, 'create_snapshot', volume_id=volume['id'],
snapshot_id=snapshot['id'])
def delete_snapshot(self, ctxt, snapshot, host):
- cctxt = self.client.prepare(server=host)
+ new_host = utils.extract_host(host)
+ cctxt = self.client.prepare(server=new_host)
cctxt.cast(ctxt, 'delete_snapshot', snapshot_id=snapshot['id'])
def attach_volume(self, ctxt, volume, instance_uuid, host_name,
mountpoint, mode):
- cctxt = self.client.prepare(server=volume['host'], version='1.11')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.11')
return cctxt.call(ctxt, 'attach_volume',
volume_id=volume['id'],
instance_uuid=instance_uuid,
mode=mode)
def detach_volume(self, ctxt, volume):
- cctxt = self.client.prepare(server=volume['host'])
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host)
return cctxt.call(ctxt, 'detach_volume', volume_id=volume['id'])
def copy_volume_to_image(self, ctxt, volume, image_meta):
- cctxt = self.client.prepare(server=volume['host'], version='1.3')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.3')
cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'],
image_meta=image_meta)
def initialize_connection(self, ctxt, volume, connector):
- cctxt = self.client.prepare(server=volume['host'])
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host)
return cctxt.call(ctxt, 'initialize_connection',
volume_id=volume['id'],
connector=connector)
def terminate_connection(self, ctxt, volume, connector, force=False):
- cctxt = self.client.prepare(server=volume['host'])
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host)
return cctxt.call(ctxt, 'terminate_connection', volume_id=volume['id'],
connector=connector, force=force)
cctxt.cast(ctxt, 'publish_service_capabilities')
def accept_transfer(self, ctxt, volume, new_user, new_project):
- cctxt = self.client.prepare(server=volume['host'], version='1.9')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.9')
cctxt.cast(ctxt, 'accept_transfer', volume_id=volume['id'],
new_user=new_user, new_project=new_project)
def extend_volume(self, ctxt, volume, new_size, reservations):
- cctxt = self.client.prepare(server=volume['host'], version='1.14')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.14')
cctxt.cast(ctxt, 'extend_volume', volume_id=volume['id'],
new_size=new_size, reservations=reservations)
def migrate_volume(self, ctxt, volume, dest_host, force_host_copy):
- cctxt = self.client.prepare(server=volume['host'], version='1.8')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.8')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
cctxt.cast(ctxt, 'migrate_volume', volume_id=volume['id'],
host=host_p, force_host_copy=force_host_copy)
def migrate_volume_completion(self, ctxt, volume, new_volume, error):
- cctxt = self.client.prepare(server=volume['host'], version='1.10')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.10')
return cctxt.call(ctxt, 'migrate_volume_completion',
volume_id=volume['id'],
new_volume_id=new_volume['id'],
def retype(self, ctxt, volume, new_type_id, dest_host,
migration_policy='never', reservations=None):
- cctxt = self.client.prepare(server=volume['host'], version='1.12')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.12')
host_p = {'host': dest_host.host,
'capabilities': dest_host.capabilities}
cctxt.cast(ctxt, 'retype', volume_id=volume['id'],
reservations=reservations)
def manage_existing(self, ctxt, volume, ref):
- cctxt = self.client.prepare(server=volume['host'], version='1.15')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.15')
cctxt.cast(ctxt, 'manage_existing', volume_id=volume['id'], ref=ref)
def promote_replica(self, ctxt, volume):
- cctxt = self.client.prepare(server=volume['host'], version='1.17')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.17')
cctxt.cast(ctxt, 'promote_replica', volume_id=volume['id'])
def reenable_replication(self, ctxt, volume):
- cctxt = self.client.prepare(server=volume['host'], version='1.17')
+ new_host = utils.extract_host(volume['host'])
+ cctxt = self.client.prepare(server=new_host, version='1.17')
cctxt.cast(ctxt, 'reenable_replication', volume_id=volume['id'])
def generate_username(length=20, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
# Use the same implementation as the password generation.
return generate_password(length, symbolgroups)
+
+
+DEFAULT_POOL_NAME = '_pool0'
+
+
+def extract_host(host, level='backend', default_pool_name=False):
+ """Extract Host, Backend or Pool information from host string.
+
+ :param host: String for host, which could include host@backend#pool info
+ :param level: Indicate which level of information should be extracted
+ from host string. Level can be 'host', 'backend' or 'pool',
+ default value is 'backend'
+ :param default_pool_name: this flag specify what to do if level == 'pool'
+ and there is no 'pool' info encoded in host
+ string. default_pool_name=True will return
+ DEFAULT_POOL_NAME, otherwise we return None.
+ Default value of this parameter is False.
+ :return: expected level of information
+
+ For example:
+ host = 'HostA@BackendB#PoolC'
+ ret = extract_host(host, 'host')
+ # ret is 'HostA'
+ ret = extract_host(host, 'backend')
+ # ret is 'HostA@BackendB'
+ ret = extract_host(host, 'pool')
+ # ret is 'PoolC'
+
+ host = 'HostX@BackendY'
+ ret = extract_host(host, 'pool')
+ # ret is None
+ ret = extract_host(host, 'pool', True)
+ # ret is '_pool0'
+ """
+ if level == 'host':
+ # make sure pool is not included
+ hst = host.split('#')[0]
+ return hst.split('@')[0]
+ elif level == 'backend':
+ return host.split('#')[0]
+ elif level == 'pool':
+ lst = host.split('#')
+ if len(lst) == 2:
+ return lst[1]
+ elif default_pool_name is True:
+ return DEFAULT_POOL_NAME
+ else:
+ return None
+
+
+def append_host(host, pool):
+ """Encode pool into host info."""
+ if not host or not pool:
+ return host
+
+ new_host = "#".join([host, pool])
+ return new_host