from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
import cinder.volume.drivers.rbd as rbd_driver
help='The Ceph user to connect with. Default here is to use '
'the same user as for Cinder volumes. If not using cephx '
'this should be set to None.'),
- cfg.IntOpt('backup_ceph_chunk_size', default=(units.MiB * 128),
+ cfg.IntOpt('backup_ceph_chunk_size', default=(units.Mi * 128),
help='The chunk size, in bytes, that a backup is broken into '
'before transfer to the Ceph object store.'),
cfg.StrOpt('backup_ceph_pool', default='backups',
errmsg = _("Need non-zero volume size")
raise exception.InvalidParameterValue(errmsg)
- return int(volume['size']) * units.GiB
+ return int(volume['size']) * units.Gi
def _backup_metadata(self, backup):
"""Backup volume metadata.
volume_name = volume['name']
backup_id = backup['id']
backup_volume_id = backup['volume_id']
- length = int(volume['size']) * units.GiB
+ length = int(volume['size']) * units.Gi
base_name = self._get_backup_base_name(backup['volume_id'],
diff_format=True)
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
-from cinder import units
+from cinder.openstack.common import units
from swiftclient import client as swift
backup['service_metadata'] = object_prefix
self.db.backup_update(self.context, backup_id, {'service_metadata':
object_prefix})
- volume_size_bytes = volume['size'] * units.GiB
+ volume_size_bytes = volume['size'] * units.Gi
availability_zone = self.az
LOG.debug('starting backup of volume: %(volume_id)s to swift,'
' volume size: %(volume_size_bytes)d, swift object names'
from cinder.openstack.common import imageutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume import utils as volume_utils
return
data = qemu_img_info(tmp)
- virt_size = data.virtual_size / units.GiB
+ virt_size = data.virtual_size / units.Gi
# NOTE(xqueralt): If the image virtual size doesn't fit in the
# requested volume there is no point on resizing it because it will
-# Copyright 2011 OpenStack Foundation
+# Copyright 2013 IBM Corp
+# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
"""
-A module where we define some basic units for use across Cinder.
+Unit constants
"""
-KiB = 1024
-MiB = KiB * 1024
-GiB = MiB * 1024
-TiB = GiB * 1024
+#Binary unit constants.
+Ki = 1024
+Mi = 1024 ** 2
+Gi = 1024 ** 3
+Ti = 1024 ** 4
+Pi = 1024 ** 5
+Ei = 1024 ** 6
+Zi = 1024 ** 7
+Yi = 1024 ** 8
+
+#Decimal unit constants.
+k = 1000
+M = 1000 ** 2
+G = 1000 ** 3
+T = 1000 ** 4
+P = 1000 ** 5
+E = 1000 ** 6
+Z = 1000 ** 7
+Y = 1000 ** 8
from cinder.image import image_utils
from cinder.openstack.common import jsonutils
from cinder.openstack.common import log as logging
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers import coraid
def to_coraid_kb(gb):
- return math.ceil(float(gb) * units.GiB / 1000)
+ return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):
from cinder.image import image_utils
from cinder.openstack.common import imageutils
from cinder.openstack.common import processutils as putils
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume import driver as base_driver
mox.StubOutWithMock(drv, '_get_available_capacity')
drv._get_available_capacity(self.TEST_EXPORT1).\
- AndReturn((2 * units.GiB, 5 * units.GiB))
+ AndReturn((2 * units.Gi, 5 * units.Gi))
drv._get_available_capacity(self.TEST_EXPORT2).\
- AndReturn((3 * units.GiB, 10 * units.GiB))
+ AndReturn((3 * units.Gi, 10 * units.Gi))
mox.ReplayAll()
mox.StubOutWithMock(drv, '_get_available_capacity')
drv._get_available_capacity(self.TEST_EXPORT1).\
- AndReturn((0, 5 * units.GiB))
+ AndReturn((0, 5 * units.Gi))
drv._get_available_capacity(self.TEST_EXPORT2).\
- AndReturn((0, 10 * units.GiB))
+ AndReturn((0, 10 * units.Gi))
mox.ReplayAll()
drv._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata', path,
- str(volume['size'] * units.GiB),
+ str(volume['size'] * units.Gi),
run_as_root=True)
drv._execute('chmod', 'ugo+rw', path, run_as_root=True)
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import gpfs
mock_rw_permission,
mock_gpfs_redirect,
mock_resize_volume_file):
- mock_resize_volume_file.return_value = 5 * units.GiB
+ mock_resize_volume_file.return_value = 5 * units.Gi
volume = {}
volume['size'] = 1000
self.assertEqual(self.driver.create_volume_from_snapshot(volume, ''),
mock_create_gpfs_clone,
mock_rw_permission,
mock_resize_volume_file):
- mock_resize_volume_file.return_value = 5 * units.GiB
+ mock_resize_volume_file.return_value = 5 * units.Gi
volume = {}
volume['size'] = 1000
self.assertEqual(self.driver.create_cloned_volume(volume, ''),
self.assertEqual(None, self.driver.terminate_connection('', ''))
def test_get_volume_stats(self):
- fake_avail = 80 * units.GiB
+ fake_avail = 80 * units.Gi
fake_size = 2 * fake_avail
with mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
'_get_available_capacity',
data = FakeQemuImgInfo()
data.file_format = 'qcow2'
data.backing_file = None
- data.virtual_size = 1 * units.GiB
+ data.virtual_size = 1 * units.Gi
return data
def _fake_qemu_raw_image_info(self, path):
data = FakeQemuImgInfo()
data.file_format = 'raw'
data.backing_file = None
- data.virtual_size = 1 * units.GiB
+ data.virtual_size = 1 * units.Gi
return data
def _fake_retype_arguments(self):
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder.tests import fake_hp_3par_client as hp3parclient
from cinder.volume.drivers.san.hp import hp_3par_fc as hpfcdriver
old_size = self.volume['size']
new_size = old_size + grow_size
self.driver.extend_volume(self.volume, str(new_size))
- growth_size_mib = grow_size * units.KiB
+ growth_size_mib = grow_size * units.Ki
expected = [
mock.call.growVolume(self.VOLUME_3PAR_NAME, growth_size_mib)]
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder.tests import fake_hp_lefthand_client as hplefthandclient
from cinder.volume.drivers.san.hp import hp_lefthand_iscsi
_mock_client.return_value.getClusterByName.return_value = {
'id': 1, 'virtualIPAddresses': [{'ipV4Address': '10.0.1.6'}]}
_mock_client.return_value.getCluster.return_value = {
- 'spaceTotal': units.GiB * 500,
- 'spaceAvailable': units.GiB * 250}
+ 'spaceTotal': units.Gi * 500,
+ 'spaceAvailable': units.Gi * 250}
self.driver = hp_lefthand_iscsi.HPLeftHandISCSIDriver(
configuration=config)
self.driver.do_setup(None)
mock.call.createVolume(
'fakevolume',
1,
- units.GiB,
+ units.Gi,
{'isThinProvisioned': True, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
mock.call.createVolume(
'fakevolume',
1,
- units.GiB,
+ units.Gi,
{'isThinProvisioned': False, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
expected = self.driver_startup_call_stack + [
mock.call.getVolumeByName('fakevolume'),
- mock.call.modifyVolume(1, {'size': 2 * units.GiB})]
+ mock.call.modifyVolume(1, {'size': 2 * units.Gi})]
# validate call chain
mock_client.assert_has_calls(expected)
mock.call.createVolume(
'fakevolume',
1,
- units.GiB,
+ units.Gi,
{'isThinProvisioned': True, 'clusterName': 'CloudCluster1'})]
mock_client.assert_has_calls(expected)
mock.call.createVolume(
'fakevolume',
1,
- units.GiB,
+ units.Gi,
{'isThinProvisioned': True,
'clusterName': 'CloudCluster1',
'isAdaptiveOptimizationEnabled': False})]
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import processutils
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder import utils
data.write(self._imagedata.get(image_id, ''))
def show(self, context, image_id):
- return {'size': 2 * units.GiB,
+ return {'size': 2 * units.Gi,
'disk_format': 'qcow2',
'container_format': 'bare'}
from cinder import context
from cinder import db
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import iscsi
self.mox.ReplayAll()
total, free, allocated = self.drv._get_capacity_info(self.TEST_EXPORT1)
- self.assertEqual(total, 3 * units.GiB)
- self.assertEqual(free, units.GiB)
- self.assertEqual(allocated, 2 * units.GiB)
+ self.assertEqual(total, 3 * units.Gi)
+ self.assertEqual(free, units.Gi)
+ self.assertEqual(allocated, 2 * units.Gi)
def test_get_share_datasets(self):
self.drv.share2nms = {self.TEST_EXPORT1: self.nms_mock}
('1023b', 1023),
('0B', 0),
# Test other units
- ('1M', units.MiB),
- ('1.0M', units.MiB),
+ ('1M', units.Mi),
+ ('1.0M', units.Mi),
)
for value, result in values_to_test:
def test_str2gib_size(self):
self.assertEqual(utils.str2gib_size('1024M'), 1)
self.assertEqual(utils.str2gib_size('300M'),
- 300 * units.MiB // units.GiB)
+ 300 * units.Mi // units.Gi)
self.assertEqual(utils.str2gib_size('1.2T'),
- 1.2 * units.TiB // units.GiB)
+ 1.2 * units.Ti // units.Gi)
self.assertRaises(ValueError, utils.str2gib_size, 'A')
def test_parse_nms_url(self):
from cinder import context
from cinder import exception
from cinder.image import image_utils
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers import nfs
mox.StubOutWithMock(drv, '_execute')
drv._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata', '/path',
- '%s' % str(file_size * units.GiB), run_as_root=True)
+ '%s' % str(file_size * units.Gi), run_as_root=True)
mox.ReplayAll()
mox.StubOutWithMock(image_utils, 'qemu_img_info')
data = mox_lib.MockAnything()
- data.virtual_size = 1 * units.GiB
+ data.virtual_size = 1 * units.Gi
image_utils.qemu_img_info(TEST_IMG_SOURCE).AndReturn(data)
mox.ReplayAll()
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
- AndReturn((5 * units.GiB, 2 * units.GiB,
- 2 * units.GiB))
+ AndReturn((5 * units.Gi, 2 * units.Gi,
+ 2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
- AndReturn((5 * units.GiB, 2 * units.GiB,
- 2 * units.GiB))
+ AndReturn((5 * units.Gi, 2 * units.Gi,
+ 2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
- AndReturn((10 * units.GiB, 3 * units.GiB,
- 1 * units.GiB))
+ AndReturn((10 * units.Gi, 3 * units.Gi,
+ 1 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
- AndReturn((10 * units.GiB, 3 * units.GiB,
- 1 * units.GiB))
+ AndReturn((10 * units.Gi, 3 * units.Gi,
+ 1 * units.Gi))
mox.ReplayAll()
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
- AndReturn((5 * units.GiB, 0, 5 * units.GiB))
+ AndReturn((5 * units.Gi, 0, 5 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
- AndReturn((10 * units.GiB, 0,
- 10 * units.GiB))
+ AndReturn((10 * units.Gi, 0,
+ 10 * units.Gi))
mox.ReplayAll()
drv._ensure_shares_mounted()
drv._get_capacity_info(self.TEST_NFS_EXPORT1).\
- AndReturn((10 * units.GiB, 2 * units.GiB,
- 2 * units.GiB))
+ AndReturn((10 * units.Gi, 2 * units.Gi,
+ 2 * units.Gi))
drv._get_capacity_info(self.TEST_NFS_EXPORT2).\
- AndReturn((20 * units.GiB, 3 * units.GiB,
- 3 * units.GiB))
+ AndReturn((20 * units.Gi, 3 * units.Gi,
+ 3 * units.Gi))
mox.ReplayAll()
requested_volume_size)
def test_is_share_eligible(self):
- total_size = 100.0 * units.GiB
- total_available = 90.0 * units.GiB
- total_allocated = 10.0 * units.GiB
+ total_size = 100.0 * units.Gi
+ total_available = 90.0 * units.Gi
+ total_allocated = 10.0 * units.Gi
requested_volume_size = 1 # GiB
self.assertTrue(self._check_is_share_eligible(total_size,
requested_volume_size))
def test_is_share_eligible_above_used_ratio(self):
- total_size = 100.0 * units.GiB
- total_available = 4.0 * units.GiB
- total_allocated = 96.0 * units.GiB
+ total_size = 100.0 * units.Gi
+ total_available = 4.0 * units.Gi
+ total_allocated = 96.0 * units.Gi
requested_volume_size = 1 # GiB
# Check used > used_ratio statement entered
requested_volume_size))
def test_is_share_eligible_above_oversub_ratio(self):
- total_size = 100.0 * units.GiB
- total_available = 10.0 * units.GiB
- total_allocated = 90.0 * units.GiB
+ total_size = 100.0 * units.Gi
+ total_available = 10.0 * units.Gi
+ total_allocated = 90.0 * units.Gi
requested_volume_size = 10 # GiB
# Check apparent_available <= requested_volume_size statement entered
requested_volume_size))
def test_is_share_eligible_reserved_space_above_oversub_ratio(self):
- total_size = 100.0 * units.GiB
- total_available = 10.0 * units.GiB
- total_allocated = 100.0 * units.GiB
+ total_size = 100.0 * units.Gi
+ total_available = 10.0 * units.Gi
+ total_allocated = 100.0 * units.Gi
requested_volume_size = 1 # GiB
# Check total_allocated / total_size >= oversub_ratio
path = 'fake/path'
size = 2
data = mock.MagicMock()
- data.virtual_size = size * units.GiB
+ data.virtual_size = size * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
path = 'fake/path'
size = 2
data = mock.MagicMock()
- data.virtual_size = (size + 1) * units.GiB
+ data.virtual_size = (size + 1) * units.Gi
with mock.patch.object(image_utils, 'qemu_img_info',
return_value=data):
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
+from cinder.openstack.common import units
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests.test_volume import DriverTestCase
-from cinder import units
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
self.driver.create_volume(self.volume)
- chunk_size = self.cfg.rbd_store_chunk_size * units.MiB
+ chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
- self.volume_size * units.GiB, order]
+ self.volume_size * units.Gi, order]
kwargs = {'old_format': False,
'features': self.mock_rbd.RBD_FEATURE_LAYERING}
self.mock_rbd.RBD.create.assert_called_once_with(*args, **kwargs)
self.driver.create_volume(self.volume)
- chunk_size = self.cfg.rbd_store_chunk_size * units.MiB
+ chunk_size = self.cfg.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
args = [client.ioctx, str(self.volume_name),
- self.volume_size * units.GiB, order]
+ self.volume_size * units.Gi, order]
kwargs = {'old_format': True,
'features': 0}
self.mock_rbd.RBD.create.assert_called_once_with(*args, **kwargs)
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
- size = int(fake_size) * units.GiB
+ size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()
from cinder import context
from cinder import exception
from cinder.image import image_utils
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder import utils
from cinder.volume.drivers import scality
self.TEST_VOLNAME))
self.assertTrue(os.path.isfile(self.TEST_VOLPATH))
self.assertEqual(os.stat(self.TEST_VOLPATH).st_size,
- 100 * units.MiB)
+ 100 * units.Mi)
def test_delete_volume(self):
"""Expected behaviour for delete_volume."""
from cinder.image import image_utils
from cinder.openstack.common import processutils
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder.volume.drivers.sheepdog import SheepdogDriver
vendor_name='Open Source',
dirver_version=self.driver.VERSION,
storage_protocol='sheepdog',
- total_capacity_gb=float(107287605248) / units.GiB,
- free_capacity_gb=float(107287605248 - 3623897354) / units.GiB,
+ total_capacity_gb=float(107287605248) / units.Gi,
+ free_capacity_gb=float(107287605248 - 3623897354) / units.Gi,
reserved_percentage=0,
QoS_support=False)
actual = self.driver.get_volume_stats(True)
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
self.mox.StubOutWithMock(self.driver, '_resize')
- size = int(fake_size) * units.GiB
+ size = int(fake_size) * units.Gi
self.driver._resize(fake_vol, size=size)
self.mox.ReplayAll()
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder.volume import configuration as conf
from cinder.volume.drivers.solidfire import SolidFireDriver
from cinder.volume import qos_specs
'name': test_name,
'accountID': 25,
'sliceCount': 1,
- 'totalSize': 1 * units.GiB,
+ 'totalSize': 1 * units.Gi,
'enable512e': True,
'access': "readWrite",
'status': "active",
from cinder.openstack.common import importutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
+from cinder.openstack.common import units
from cinder import test
from cinder.tests import utils as testutils
-from cinder import units
from cinder import utils
from cinder.volume import configuration as conf
from cinder.volume.drivers.ibm import storwize_svc
return self._errors['CMMVC5753E']
curr_size = int(self._volumes_list[vol_name]['capacity'])
- addition = size * units.GiB
+ addition = size * units.Gi
self._volumes_list[vol_name]['capacity'] = str(curr_size + addition)
return ('', '')
# Make sure volume attributes are as they should be
attributes = self.driver._helpers.get_vdisk_attributes(volume['name'])
- attr_size = float(attributes['capacity']) / units.GiB # bytes to GB
+ attr_size = float(attributes['capacity']) / units.Gi # bytes to GB
self.assertEqual(attr_size, float(volume['size']))
pool = self.driver.configuration.local_conf.storwize_svc_volpool_name
self.assertEqual(attributes['mdisk_grp_name'], pool)
volume = self._create_volume()
self.driver.extend_volume(volume, '13')
attrs = self.driver._helpers.get_vdisk_attributes(volume['name'])
- vol_size = int(attrs['capacity']) / units.GiB
+ vol_size = int(attrs['capacity']) / units.Gi
+
self.assertAlmostEqual(vol_size, 13)
snap = self._generate_vol_info(volume['name'], volume['id'])
from cinder import exception
from cinder.image import glance
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder.volume import configuration
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import error_util
volumeops.get_dc.assert_called_once_with(rp)
volumeops.get_vmfolder.assert_called_once_with(mock.sentinel.dc)
driver._get_storage_profile.assert_called_once_with(volume)
- size = volume['size'] * units.GiB
+ size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size, dss)
def test_get_disk_type(self):
backing = FakeMor('VirtualMachine', 'my_back')
m.StubOutWithMock(self._volumeops, 'create_backing')
self._volumeops.create_backing(volume['name'],
- volume['size'] * units.MiB,
+ volume['size'] * units.Mi,
mox.IgnoreArg(), folder,
resource_pool, host,
mox.IgnoreArg(),
fake_context = mock.sentinel.context
fake_image_id = 'image-id'
fake_image_meta = {'disk_format': 'vmdk',
- 'size': 2 * units.GiB,
+ 'size': 2 * units.Gi,
'properties': {'vmware_disktype': 'preallocated'}}
image_service = mock.Mock(glance.GlanceImageService)
fake_size = 3
fake_context = mock.Mock()
fake_backing = mock.sentinel.backing
fake_image_id = 'image-id'
- size = 5 * units.GiB
- size_gb = float(size) / units.GiB
+ size = 5 * units.Gi
+ size_gb = float(size) / units.Gi
fake_volume_size = 1 + size_gb
fake_image_meta = {'disk_format': 'vmdk', 'size': size,
'properties': {'vmware_disktype':
volume = FakeObject()
volume['name'] = vol_name
size_gb = 5
- size = size_gb * units.GiB
+ size = size_gb * units.Gi
volume['size'] = size_gb
volume['project_id'] = project_id
volume['instance_uuid'] = None
self.VOLUME_FOLDER)
driver._get_storage_profile.assert_called_once_with(volume)
driver._filter_ds_by_profile.assert_called_once_with(dss, profile)
- size = volume['size'] * units.GiB
+ size = volume['size'] * units.Gi
driver._select_datastore_summary.assert_called_once_with(size,
filtered_dss)
import mock
+from cinder.openstack.common import units
from cinder import test
-from cinder import units
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
from cinder.volume.drivers.vmware import volumeops
invoke_api.return_value = task
disk_mgr = self.session.vim.service_content.virtualDiskManager
fake_size = 5
- fake_size_in_kb = fake_size * units.MiB
+ fake_size_in_kb = fake_size * units.Mi
fake_name = 'fake_volume_0000000001'
fake_dc = mock.sentinel.datacenter
self.vops.extend_virtual_disk(fake_size,
from cinder.openstack.common import importutils
from cinder.openstack.common import jsonutils
from cinder.openstack.common import timeutils
+from cinder.openstack.common import units
import cinder.policy
from cinder import quota
from cinder import test
from cinder.tests.image import fake as fake_image
from cinder.tests.keymgr import fake as fake_keymgr
from cinder.tests import utils as tests_utils
-from cinder import units
from cinder import utils
import cinder.volume
from cinder.volume import configuration as conf
pass
def show(self, context, image_id):
- return {'size': 2 * units.GiB,
+ return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
"""Verify that an image which is too big will fail correctly."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
- return {'size': 2 * units.GiB + 1,
+ return {'size': 2 * units.Gi + 1,
'disk_format': 'raw',
'container_format': 'bare',
'status': 'active'}
"""Verify volumes smaller than image minDisk will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
- return {'size': 2 * units.GiB,
+ return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
"""Verify create volume from image will cause an error."""
class _ModifiedFakeImageService(FakeImageService):
def show(self, context, image_id):
- return {'size': 2 * units.GiB,
+ return {'size': 2 * units.Gi,
'disk_format': 'raw',
'container_format': 'bare',
'min_disk': 5,
from cinder.openstack.common import jsonutils
from cinder.openstack.common import lockutils
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume import driver
from cinder.volume import volume_types
def to_coraid_kb(gb):
- return math.ceil(float(gb) * units.GiB / 1000)
+ return math.ceil(float(gb) * units.Gi / 1000)
def coraid_volume_size(gb):
from cinder import exception
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
def create_volume(self, volume):
"""Creates a EMC(VMAX/VNX) volume."""
LOG.debug('Entering create_volume.')
- volumesize = int(volume['size']) * units.GiB
+ volumesize = int(volume['size']) * units.Gi
volumename = volume['name']
LOG.info(_('Create Volume: %(volume)s Size: %(size)lu')
def extend_volume(self, volume, new_size):
"""Extends an existing volume."""
LOG.debug('Entering extend_volume.')
- volumesize = int(new_size) * units.GiB
+ volumesize = int(new_size) * units.Gi
volumename = volume['name']
LOG.info(_('Extend Volume: %(volume)s New size: %(size)lu')
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers import nfs
greatest_share = glusterfs_share
greatest_size = capacity
- if volume_size_for * units.GiB > greatest_size:
+ if volume_size_for * units.Gi > greatest_size:
raise exception.GlusterfsNoSuitableShareFound(
volume_size=volume_size_for)
return greatest_share
"""
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
import re
(inf[0], inf[1], inf[2], inf[3], inf[5], inf[7])
(availunit, usedunit) = (inf[4], inf[6])
if usedunit == 'GB':
- usedmultiplier = units.KiB
+ usedmultiplier = units.Ki
else:
- usedmultiplier = units.MiB
+ usedmultiplier = units.Mi
if availunit == 'GB':
- availmultiplier = units.KiB
+ availmultiplier = units.Ki
else:
- availmultiplier = units.MiB
+ availmultiplier = units.Mi
m = re.match("\((\d+)\%\)", perstr)
if m:
percent = m.group(1)
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
if 'HDP' in line:
(hdp, size, _ign, used) = line.split()[1:5] # in MB
LOG.debug("stats: looking for: %s", hdp)
- if int(hdp) >= units.KiB: # HNAS fsid
+ if int(hdp) >= units.Ki: # HNAS fsid
hdp = line.split()[11]
if hdp in self.config['hdp'].keys():
total_cap += int(size)
LOG.info("stats: total: %d used: %d" % (total_cap, total_used))
hnas_stat = {}
- hnas_stat['total_capacity_gb'] = int(total_cap / units.KiB) # in GB
+ hnas_stat['total_capacity_gb'] = int(total_cap / units.Ki) # in GB
hnas_stat['free_capacity_gb'] = \
- int((total_cap - total_used) / units.KiB)
+ int((total_cap - total_used) / units.Ki)
be_name = self.configuration.safe_get('volume_backend_name')
hnas_stat["volume_backend_name"] = be_name or 'HDSISCSIDriver'
hnas_stat["vendor_name"] = 'HDS'
for line in out.split('\n'):
if 'HDP' in line:
inf = line.split()
- if int(inf[1]) >= units.KiB:
- # HDP fsids start at units.KiB (1024)
+ if int(inf[1]) >= units.Ki:
+ # HDP fsids start at units.Ki (1024)
hdp_list.append(inf[11])
else:
# HDP pools are 2-digits max
self.config['username'],
self.config['password'],
hdp,
- '%s' % (int(volume['size']) * units.KiB),
+ '%s' % (int(volume['size']) * units.Ki),
volume['name'])
LOG.info(_("create_volume: create_lu returns %s") % out)
raise exception.VolumeBackendAPIException(data=msg)
service = self._get_service(dst)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
- size = int(src['size']) * units.KiB
+ size = int(src['size']) * units.Ki
source_vol = self._id_to_vol(src['id'])
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['username'],
self.config['password'],
hdp, lun,
- '%s' % (new_size * units.KiB),
+ '%s' % (new_size * units.Ki),
volume['name'])
LOG.info(_("LUN %(lun)s extended to %(size)s GB.")
:param snapshot: dictionary snapshot reference
"""
- size = int(snapshot['volume_size']) * units.KiB
+ size = int(snapshot['volume_size']) * units.Ki
(arid, slun) = _loc_info(snapshot['provider_location'])['id_lu']
service = self._get_service(volume)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
source_vol = self._id_to_vol(snapshot['volume_id'])
service = self._get_service(source_vol)
(_ip, _ipp, _ctl, _port, hdp, target, secret) = service
- size = int(snapshot['volume_size']) * units.KiB
+ size = int(snapshot['volume_size']) * units.Ki
(arid, slun) = _loc_info(source_vol['provider_location'])['id_lu']
out = self.bend.create_dup(self.config['hnas_cmd'],
self.config['mgmt_ip0'],
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume.drivers.hds.hnas_backend import HnasBackend
from cinder.volume.drivers import nfs
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
- virt_size = data.virtual_size / units.GiB
+ virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.huawei import huawei_utils
from cinder.volume import volume_types
calculates volume size with sectors, which is 512 bytes.
"""
- volume_size = units.GiB / 512 # 1G
+ volume_size = units.Gi / 512 # 1G
if int(volume['size']) != 0:
- volume_size = int(volume['size']) * units.GiB / 512
+ volume_size = int(volume['size']) * units.Gi / 512
return volume_size
lun_id = self._get_volume_by_name(name)
if lun_id:
url = self.url + "/lun/expand"
- capacity = int(new_size) * units.GiB / 512
+ capacity = int(new_size) * units.Gi / 512
data = json.dumps({"TYPE": "11",
"ID": lun_id,
"CAPACITY": capacity})
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
"""Preallocate file blocks by writing zeros."""
block_size_mb = 1
- block_count = size * units.GiB / (block_size_mb * units.MiB)
+ block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
self._set_rw_permission(volume_path)
self._gpfs_redirect(volume_path)
virt_size = self._resize_volume_file(volume, volume['size'])
- return {'size': math.ceil(virt_size / units.GiB)}
+ return {'size': math.ceil(virt_size / units.Gi)}
def create_cloned_volume(self, volume, src_vref):
"""Create a GPFS volume from another volume."""
self._create_gpfs_clone(src, dest)
self._set_rw_permission(dest)
virt_size = self._resize_volume_file(volume, volume['size'])
- return {'size': math.ceil(virt_size / units.GiB)}
+ return {'size': math.ceil(virt_size / units.Gi)}
def _delete_gpfs_file(self, fchild):
"""Delete a GPFS file and cleanup clone children."""
data["storage_protocol"] = 'file'
free, capacity = self._get_available_capacity(self.configuration.
gpfs_mount_point_base)
- data['total_capacity_gb'] = math.ceil(capacity / units.GiB)
- data['free_capacity_gb'] = math.ceil(free / units.GiB)
+ data['total_capacity_gb'] = math.ceil(capacity / units.Gi)
+ data['free_capacity_gb'] = math.ceil(free / units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
data['storage_pool'] = self._storage_pool
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers import nfs
from cinder.volume.drivers.nfs import nas_opts
global_capacity += capacity
global_free += free
- data['total_capacity_gb'] = global_capacity / float(units.GiB)
- data['free_capacity_gb'] = global_free / float(units.GiB)
+ data['total_capacity_gb'] = global_capacity / float(units.Gi)
+ data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.ibm.storwize_svc import helpers as storwize_helpers
from cinder.volume.drivers.san import san
raise exception.ManageExistingInvalidReference(existing_ref=ref,
reason=reason)
- return int(math.ceil(float(vdisk['capacity']) / units.GiB))
+ return int(math.ceil(float(vdisk['capacity']) / units.Gi))
def get_volume_stats(self, refresh=False):
"""Get volume stats.
raise exception.VolumeBackendAPIException(data=exception_message)
data['total_capacity_gb'] = (float(attributes['capacity']) /
- units.GiB)
+ units.Gi)
data['free_capacity_gb'] = (float(attributes['free_capacity']) /
- units.GiB)
+ units.Gi)
data['easytier_support'] = attributes['easy_tier'] in ['on', 'auto']
data['compression_support'] = self._state['compression_enabled']
data['location_info'] = ('StorwizeSVCDriver:%(sys_id)s:%(pool)s' %
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume import utils as volutils
# clear_volume expects sizes in MiB, we store integer GiB
# be sure to convert before passing in
- vol_sz_in_meg = size_in_g * units.KiB
+ vol_sz_in_meg = size_in_g * units.Ki
volutils.clear_volume(
vol_sz_in_meg, dev_path,
# be sure to convert before passing in
volutils.copy_volume(self.local_path(snapshot),
self.local_path(volume),
- snapshot['volume_size'] * units.KiB,
+ snapshot['volume_size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
volutils.copy_volume(
self.local_path(temp_snapshot),
self.local_path(volume),
- src_vref['size'] * units.KiB,
+ src_vref['size'] * units.Ki,
self.configuration.volume_dd_blocksize,
execute=self._execute)
finally:
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils as cinder_utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.eseries import client
def _get_sorted_avl_storage_pools(self, size_gb):
"""Returns storage pools sorted on available capacity."""
- size = size_gb * units.GiB
+ size = size_gb * units.Gi
pools = self._client.list_storage_pools()
sorted_pools = sorted(pools, key=lambda x:
(int(x.get('totalRaidedSpace', 0))
LOG.debug("Creating snap vol for group %s", group['label'])
image = self._get_cached_snap_grp_image(snapshot_id)
label = utils.convert_uuid_to_es_fmt(uuid.uuid4())
- capacity = int(image['pitCapacity']) / units.GiB
+ capacity = int(image['pitCapacity']) / units.Gi
storage_pools = self._get_sorted_avl_storage_pools(capacity)
s_id = storage_pools[0]['volumeGroupRef']
return self._client.create_snapshot_volume(image['pitRef'], label,
snap_grp, snap_image = None, None
snapshot_name = utils.convert_uuid_to_es_fmt(snapshot['id'])
vol = self._get_volume(snapshot['volume_id'])
- vol_size_gb = int(vol['totalSizeInBytes']) / units.GiB
+ vol_size_gb = int(vol['totalSizeInBytes']) / units.Gi
pools = self._get_sorted_avl_storage_pools(vol_size_gb)
try:
snap_grp = self._client.create_snapshot_group(
if pool['volumeGroupRef'] in self._objects['disk_pool_refs']:
tot_bytes = tot_bytes + int(pool.get('totalRaidedSpace', 0))
used_bytes = used_bytes + int(pool.get('usedSpace', 0))
- self._stats['free_capacity_gb'] = (tot_bytes - used_bytes) / units.GiB
- self._stats['total_capacity_gb'] = tot_bytes / units.GiB
+ self._stats['free_capacity_gb'] = (tot_bytes - used_bytes) / units.Gi
+ self._stats['total_capacity_gb'] = tot_bytes / units.Gi
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
from cinder.volume.drivers.netapp.api import NaApiError
name = volume['name']
path = self.lun_table[name].metadata['Path']
curr_size_bytes = str(self.lun_table[name].size)
- new_size_bytes = str(int(new_size) * units.GiB)
+ new_size_bytes = str(int(new_size) * units.Gi)
# Reused by clone scenarios.
# Hence comparing the stored size.
if curr_size_bytes != new_size_bytes:
if self.ssc_vols['all']:
vol_max = max(self.ssc_vols['all'])
data['total_capacity_gb'] =\
- int(vol_max.space['size_total_bytes']) / units.GiB
+ int(vol_max.space['size_total_bytes']) / units.Gi
data['free_capacity_gb'] =\
- int(vol_max.space['size_avl_bytes']) / units.GiB
+ int(vol_max.space['size_avl_bytes']) / units.Gi
else:
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
avl_size = vol.get_child_content('size-available')
if avl_size:
free_bytes = free_bytes + int(avl_size)
- self.total_gb = total_bytes / units.GiB
- self.free_gb = free_bytes / units.GiB
+ self.total_gb = total_bytes / units.Gi
+ self.free_gb = free_bytes / units.Gi
def delete_volume(self, volume):
"""Driver entry point for destroying existing volumes."""
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume.drivers.netapp.api import NaApiError
from cinder.volume.drivers.netapp.api import NaElement
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
- virt_size = data.virtual_size / units.GiB
+ virt_size = data.virtual_size / units.Gi
if virt_size == size:
return True
else:
if self.ssc_vols['all']:
vol_max = max(self.ssc_vols['all'])
data['total_capacity_gb'] =\
- int(vol_max.space['size_total_bytes']) / units.GiB
+ int(vol_max.space['size_total_bytes']) / units.Gi
data['free_capacity_gb'] =\
- int(vol_max.space['size_avl_bytes']) / units.GiB
+ int(vol_max.space['size_avl_bytes']) / units.Gi
else:
data['total_capacity_gb'] = 0
data['free_capacity_gb'] = 0
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume.drivers import nexenta
from cinder.volume.drivers.nexenta import jsonrpc
from cinder.volume.drivers.nexenta import options
:param size: size of file
"""
block_size_mb = 1
- block_count = size * units.GiB / (block_size_mb * units.MiB)
+ block_count = size * units.Gi / (block_size_mb * units.Mi)
LOG.info(_('Creating regular file: %s.'
'This may take some time.') % path)
import six.moves.urllib.parse as urlparse
-from cinder import units
+from cinder.openstack.common import units
def str2size(s, scale=1024):
def str2gib_size(s):
"""Covert size-string to size in gigabytes."""
size_in_bytes = str2size(s)
- return size_in_bytes / units.GiB
+ return size_in_bytes / units.Gi
def get_rrmgr_cmd(src, dst, compression=None, tcp_buf_size=None,
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils as putils
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume import driver
"""
block_size_mb = 1
- block_count = size * units.GiB / (block_size_mb * units.MiB)
+ block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
- path, str(size_gb * units.GiB),
+ path, str(size_gb * units.Gi),
run_as_root=True)
def _set_rw_permissions_for_all(self, path):
image_utils.resize_image(self.local_path(volume), volume['size'])
data = image_utils.qemu_img_info(self.local_path(volume))
- virt_size = data.virtual_size / units.GiB
+ virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
global_capacity += capacity
global_free += free
- data['total_capacity_gb'] = global_capacity / float(units.GiB)
- data['free_capacity_gb'] = global_free / float(units.GiB)
+ data['total_capacity_gb'] = global_capacity / float(units.Gi)
+ data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
used_ratio = self.configuration.nfs_used_ratio
oversub_ratio = self.configuration.nfs_oversub_ratio
- requested_volume_size = volume_size_in_gib * units.GiB
+ requested_volume_size = volume_size_in_gib * units.Gi
total_size, total_available, total_allocated = \
self._get_capacity_info(nfs_share)
def _is_file_size_equal(self, path, size):
"""Checks if file size at path is equal to size."""
data = image_utils.qemu_img_info(path)
- virt_size = data.virtual_size / units.GiB
+ virt_size = data.virtual_size / units.Gi
return virt_size == size
from cinder.openstack.common import fileutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import strutils
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume import driver
try:
try:
with RADOSClient(self) as client:
new_stats = client.cluster.get_cluster_stats()
- stats['total_capacity_gb'] = new_stats['kb'] / units.MiB
- stats['free_capacity_gb'] = new_stats['kb_avail'] / units.MiB
+ stats['total_capacity_gb'] = new_stats['kb'] / units.Mi
+ stats['free_capacity_gb'] = new_stats['kb_avail'] / units.Mi
except self.rados.Error:
# just log and return unknown capacities
LOG.exception(_('error refreshing volume stats'))
def create_volume(self, volume):
"""Creates a logical volume."""
if int(volume['size']) == 0:
- size = 100 * units.MiB
+ size = 100 * units.Mi
else:
- size = int(volume['size']) * units.GiB
+ size = int(volume['size']) * units.Gi
LOG.debug("creating volume '%s'" % (volume['name']))
old_format = True
features = 0
- chunk_size = CONF.rbd_store_chunk_size * units.MiB
+ chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
if self._supports_layering():
old_format = False
def _resize(self, volume, **kwargs):
size = kwargs.get('size', None)
if not size:
- size = int(volume['size']) * units.GiB
+ size = int(volume['size']) * units.Gi
with RBDVolumeProxy(self, volume['name']) as vol:
vol.resize(size)
self.delete_volume(volume)
- chunk_size = CONF.rbd_store_chunk_size * units.MiB
+ chunk_size = CONF.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
old_size = volume['size']
try:
- size = int(new_size) * units.GiB
+ size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume import qos_specs
from cinder.volume import volume_types
" by %(diff)s GB." %
{'vol': volume_name, 'old': old_size, 'new': new_size,
'diff': growth_size})
- growth_size_mib = growth_size * units.KiB
+ growth_size_mib = growth_size * units.Ki
self._extend_volume(volume, volume_name, growth_size_mib)
def _extend_volume(self, volume, volume_name, growth_size_mib,
if min_io is None:
qosRule['ioMinGoal'] = int(max_io)
if min_bw:
- qosRule['bwMinGoalKB'] = int(min_bw) * units.KiB
+ qosRule['bwMinGoalKB'] = int(min_bw) * units.Ki
if max_bw is None:
- qosRule['bwMaxLimitKB'] = int(min_bw) * units.KiB
+ qosRule['bwMaxLimitKB'] = int(min_bw) * units.Ki
if max_bw:
- qosRule['bwMaxLimitKB'] = int(max_bw) * units.KiB
+ qosRule['bwMaxLimitKB'] = int(max_bw) * units.Ki
if min_bw is None:
- qosRule['bwMinGoalKB'] = int(max_bw) * units.KiB
+ qosRule['bwMinGoalKB'] = int(max_bw) * units.Ki
if latency:
qosRule['latencyGoal'] = int(latency)
if priority:
LOG.debug('Converting to base volume type: %s.' %
volume['id'])
self._convert_to_base_volume(volume)
- growth_size_mib = growth_size * units.GiB / units.MiB
+ growth_size_mib = growth_size * units.Gi / units.Mi
LOG.debug('Growing volume: %(id)s by %(size)s GiB.' %
{'id': volume['id'], 'size': growth_size})
self.client.growVolume(volume_name, growth_size_mib)
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume.drivers.san.san import SanISCSIDriver
cluster_node = result_xml.find("response/cluster")
total_capacity = cluster_node.attrib.get("spaceTotal")
free_capacity = cluster_node.attrib.get("unprovisionedSpace")
- GB = units.GiB
+ GB = units.Gi
data['total_capacity_gb'] = int(total_capacity) / GB
data['free_capacity_gb'] = int(free_capacity) / GB
from cinder import context
from cinder import exception
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder import utils
from cinder.volume.driver import ISCSIDriver
from cinder.volume import volume_types
volume_info = self.client.createVolume(
volume['name'], self.cluster_id,
- volume['size'] * units.GiB,
+ volume['size'] * units.Gi,
optional)
return self._update_provider(volume_info)
volume_info = self.client.getVolumeByName(volume['name'])
# convert GB to bytes
- options = {'size': int(new_size) * units.GiB}
+ options = {'size': int(new_size) * units.Gi}
self.client.modifyVolume(volume_info['id'], options)
except Exception as ex:
raise exception.VolumeBackendAPIException(ex)
free_capacity = cluster_info['spaceAvailable']
# convert to GB
- data['total_capacity_gb'] = int(total_capacity) / units.GiB
- data['free_capacity_gb'] = int(free_capacity) / units.GiB
+ data['total_capacity_gb'] = int(total_capacity) / units.Gi
+ data['free_capacity_gb'] = int(free_capacity) / units.Gi
self.device_stats = data
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume import driver
def _size_bytes(self, size_in_g):
if int(size_in_g) == 0:
- return 100 * units.MiB
- return int(size_in_g) * units.GiB
+ return 100 * units.Mi
+ return int(size_in_g) * units.Gi
def _create_file(self, path, size):
with open(path, "ab") as f:
from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume import driver
def _resize(self, volume, size=None):
if not size:
- size = int(volume['size']) * units.GiB
+ size = int(volume['size']) * units.Gi
self._try_execute('collie', 'vdi', 'resize',
volume['name'], size)
m = self.stats_pattern.match(stdout)
total = float(m.group(1))
used = float(m.group(2))
- stats['total_capacity_gb'] = total / units.GiB
- stats['free_capacity_gb'] = (total - used) / units.GiB
+ stats['total_capacity_gb'] = total / units.Gi
+ stats['free_capacity_gb'] = (total - used) / units.Gi
except processutils.ProcessExecutionError:
LOG.exception(_('error refreshing volume stats'))
old_size = volume['size']
try:
- size = int(new_size) * units.GiB
+ size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
from cinder import exception
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume.drivers.san.san import SanISCSIDriver
from cinder.volume import qos_specs
from cinder.volume import volume_types
params = {'volumeID': int(sf_vol['volumeID']),
'name': 'UUID-%s' % v_ref['id'],
- 'newSize': int(new_size * units.GiB),
+ 'newSize': int(new_size * units.Gi),
'newAccountID': sfaccount['accountID']}
data = self._issue_api_request('CloneVolume', params)
params = {'name': 'UUID-%s' % volume['id'],
'accountID': None,
'sliceCount': slice_count,
- 'totalSize': int(volume['size'] * units.GiB),
+ 'totalSize': int(volume['size'] * units.Gi),
'enable512e': self.configuration.sf_emulate_512,
'attributes': attributes,
'qos': qos}
params = {
'volumeID': sf_vol['volumeID'],
- 'totalSize': int(new_size * units.GiB)
+ 'totalSize': int(new_size * units.Gi)
}
data = self._issue_api_request('ModifyVolume',
params, version='5.0')
data["storage_protocol"] = 'iSCSI'
data['total_capacity_gb'] =\
- float(results['maxProvisionedSpace'] / units.GiB)
+ float(results['maxProvisionedSpace'] / units.Gi)
- data['free_capacity_gb'] = float(free_capacity / units.GiB)
+ data['free_capacity_gb'] = float(free_capacity / units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = True
data['compression_percent'] =\
from cinder import exception
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume import driver
from cinder.volume.drivers.vmware import api
from cinder.volume.drivers.vmware import error_util
"volume since policy based placement is "
"disabled."), storage_profile)
- size_bytes = volume['size'] * units.GiB
+ size_bytes = volume['size'] * units.Gi
datastore_summary = self._select_datastore_summary(size_bytes,
datastores)
return (folder, datastore_summary)
resource_pool,
datastores)
disk_type = VMwareEsxVmdkDriver._get_disk_type(volume)
- size_kb = volume['size'] * units.MiB
+ size_kb = volume['size'] * units.Mi
storage_profile = self._get_storage_profile(volume)
profileId = None
if self._storage_policy_enabled and storage_profile:
"streamOptimized"
"""
# Set volume size in GB from image metadata
- volume['size'] = float(image_size) / units.GiB
+ volume['size'] = float(image_size) / units.Gi
# First create empty backing in the inventory
backing = self._create_backing_in_inventory(volume)
# image size. If the volume_size_in_gb is greater, meaning the
# user specifies a larger volume, we need to extend/resize the vmdk
# virtual disk to the capacity specified by the user.
- if volume_size_in_gb * units.GiB > image_size_in_bytes:
+ if volume_size_in_gb * units.Gi > image_size_in_bytes:
self._extend_vmdk_virtual_disk(volume['name'], volume_size_in_gb)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
host=host_ip,
vm=backing,
vmdk_file_path=vmdk_file_path,
- vmdk_size=volume['size'] * units.GiB,
+ vmdk_size=volume['size'] * units.Gi,
image_name=image_meta['name'],
image_version=1)
LOG.info(_("Done copying volume %(vol)s to a new image %(img)s") %
"""
from cinder.openstack.common import log as logging
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume.drivers.vmware import error_util
from cinder.volume.drivers.vmware import vim_util
# VMWare API needs the capacity unit to be in KB, so convert the
# capacity unit from GB to KB.
- size_in_kb = requested_size_in_gb * units.MiB
+ size_in_kb = requested_size_in_gb * units.Mi
task = self._session.invoke_api(self._session.vim,
"ExtendVirtualDisk_Task",
diskMgr,
import os
import pickle
-from cinder import units
+from cinder.openstack.common import units
from cinder.volume.drivers.xenapi import tools
def to_bytes(size_in_gigs):
- return size_in_gigs * units.GiB
+ return size_in_gigs * units.Gi
class NFSOperationsMixIn(CompoundOperations):
from cinder import flow_utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import timeutils
+from cinder.openstack.common import units
from cinder import policy
from cinder import quota
-from cinder import units
from cinder import utils
from cinder.volume.flows import common
from cinder.volume import volume_types
ACTION = 'volume:create'
CONF = cfg.CONF
-GB = units.GiB
+GB = units.Gi
QUOTAS = quota.QUOTAS
# Only in these 'sources' status can we attempt to create a volume from a
from cinder.openstack.common import log as logging
from cinder.openstack.common import processutils
from cinder.openstack.common import strutils
+from cinder.openstack.common import units
from cinder import rpc
-from cinder import units
from cinder import utils
blocksize = CONF.volume_dd_blocksize
bs = strutils.string_to_bytes('%sB' % blocksize)
- count = math.ceil(size_in_m * units.MiB / bs)
+ count = math.ceil(size_in_m * units.Mi / bs)
return blocksize, int(count)
module=sslutils
module=strutils
module=timeutils
+module=units
module=uuidutils
module=versionutils