from oslo_utils import excutils
from oslo_utils import units
-from cinder.backup.driver import BackupDriver
+from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
LOG.debug(msg)
-class CephBackupDriver(BackupDriver):
+class CephBackupDriver(driver.BackupDriver):
"""Backup Cinder volumes to Ceph Object Store.
This class enables backing up Cinder volumes to a Ceph object store.
import six
from swiftclient import client as swift
-from cinder.backup.driver import BackupDriver
+from cinder.backup import driver
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
CONF.register_opts(swiftbackup_service_opts)
-class SwiftBackupDriver(BackupDriver):
+class SwiftBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of backup objects within Swift."""
DRIVER_VERSION = '1.0.0'
from oslo_concurrency import processutils
from oslo_config import cfg
-from cinder.backup.driver import BackupDriver
+from cinder.backup import driver
from cinder import exception
from cinder.i18n import _LE, _
from cinder.openstack.common import log as logging
LOG.error(err)
-class TSMBackupDriver(BackupDriver):
+class TSMBackupDriver(driver.BackupDriver):
"""Provides backup, restore and delete of volumes backup for TSM."""
DRIVER_VERSION = '1.0.0'
from __future__ import print_function
-from datetime import datetime
+import datetime
import sys
import traceback
import warnings
rpc.init(CONF)
begin, end = utils.last_completed_audit_period()
if CONF.start_time:
- begin = datetime.strptime(CONF.start_time, "%Y-%m-%d %H:%M:%S")
+ begin = datetime.datetime.strptime(CONF.start_time,
+ "%Y-%m-%d %H:%M:%S")
if CONF.end_time:
- end = datetime.strptime(CONF.end_time, "%Y-%m-%d %H:%M:%S")
+ end = datetime.datetime.strptime(CONF.end_time,
+ "%Y-%m-%d %H:%M:%S")
if not end > begin:
msg = _("The end time (%(end)s) must be after the start "
"time (%(start)s).") % {'start': begin,
"""Implementation of SQLAlchemy backend."""
-from datetime import datetime
-from datetime import timedelta
+import datetime as dt
import functools
import sys
import threading
LOG.info(_LI('Purging deleted rows older than age=%(age)d days '
'from table=%(table)s'), {'age': age_in_days,
'table': table})
- deleted_age = datetime.now() - timedelta(days=age_in_days)
+ deleted_age = dt.datetime.now() - dt.timedelta(days=age_in_days)
try:
with session.begin():
result = session.execute(
# under the License.
-from datetime import datetime
+import datetime
from oslo_utils import timeutils
import webob.exc
from cinder.tests.api import fakes
-fake_services_list = [{'binary': 'cinder-scheduler',
- 'host': 'host1',
- 'availability_zone': 'cinder',
- 'id': 1,
- 'disabled': True,
- 'updated_at': datetime(2012, 10, 29, 13, 42, 2),
- 'created_at': datetime(2012, 9, 18, 2, 46, 27),
- 'disabled_reason': 'test1'},
- {'binary': 'cinder-volume',
- 'host': 'host1',
- 'availability_zone': 'cinder',
- 'id': 2,
- 'disabled': True,
- 'updated_at': datetime(2012, 10, 29, 13, 42, 5),
- 'created_at': datetime(2012, 9, 18, 2, 46, 27),
- 'disabled_reason': 'test2'},
- {'binary': 'cinder-scheduler',
- 'host': 'host2',
- 'availability_zone': 'cinder',
- 'id': 3,
- 'disabled': False,
- 'updated_at': datetime(2012, 9, 19, 6, 55, 34),
- 'created_at': datetime(2012, 9, 18, 2, 46, 28),
- 'disabled_reason': ''},
- {'binary': 'cinder-volume',
- 'host': 'host2',
- 'availability_zone': 'cinder',
- 'id': 4,
- 'disabled': True,
- 'updated_at': datetime(2012, 9, 18, 8, 3, 38),
- 'created_at': datetime(2012, 9, 18, 2, 46, 28),
- 'disabled_reason': 'test4'},
- ]
+fake_services_list = [
+ {'binary': 'cinder-scheduler',
+ 'host': 'host1',
+ 'availability_zone': 'cinder',
+ 'id': 1,
+ 'disabled': True,
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
+ 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
+ 'disabled_reason': 'test1'},
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'availability_zone': 'cinder',
+ 'id': 2,
+ 'disabled': True,
+ 'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 5),
+ 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 27),
+ 'disabled_reason': 'test2'},
+ {'binary': 'cinder-scheduler',
+ 'host': 'host2',
+ 'availability_zone': 'cinder',
+ 'id': 3,
+ 'disabled': False,
+ 'updated_at': datetime.datetime(2012, 9, 19, 6, 55, 34),
+ 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
+ 'disabled_reason': ''},
+ {'binary': 'cinder-volume',
+ 'host': 'host2',
+ 'availability_zone': 'cinder',
+ 'id': 4,
+ 'disabled': True,
+ 'updated_at': datetime.datetime(2012, 9, 18, 8, 3, 38),
+ 'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
+ 'disabled_reason': 'test4'}, ]
class FakeRequest(object):
def fake_utcnow():
- return datetime(2012, 10, 29, 13, 42, 11)
+ return datetime.datetime(2012, 10, 29, 13, 42, 11)
class ServicesTest(test.TestCase):
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(
+ 'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2)},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(
+ 'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5)},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
- 'updated_at': datetime(
+ 'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34)},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
- 'updated_at': datetime(
+ 'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38)}]}
self.assertEqual(res_dict, response)
response = {'services': [{'binary': 'cinder-scheduler',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(
+ 'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 2),
'disabled_reason': 'test1'},
{'binary': 'cinder-volume',
'host': 'host1', 'zone': 'cinder',
'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(
+ 'updated_at': datetime.datetime(
2012, 10, 29, 13, 42, 5),
'disabled_reason': 'test2'},
{'binary': 'cinder-scheduler',
'host': 'host2',
'zone': 'cinder',
'status': 'enabled', 'state': 'down',
- 'updated_at': datetime(
+ 'updated_at': datetime.datetime(
2012, 9, 19, 6, 55, 34),
'disabled_reason': ''},
{'binary': 'cinder-volume',
'host': 'host2',
'zone': 'cinder',
'status': 'disabled', 'state': 'down',
- 'updated_at': datetime(
+ 'updated_at': datetime.datetime(
2012, 9, 18, 8, 3, 38),
'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-scheduler',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10,
- 29, 13, 42, 2)},
- {'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5)}]}
+ response = {'services': [
+ {'binary': 'cinder-scheduler',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10,
+ 29, 13, 42, 2)},
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host(self):
req = FakeRequestWithHost()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-scheduler',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10,
- 29, 13, 42, 2),
- 'disabled_reason': 'test1'},
- {'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled', 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5),
- 'disabled_reason': 'test2'}]}
+ response = {'services': [
+ {'binary': 'cinder-scheduler',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10,
+ 29, 13, 42, 2),
+ 'disabled_reason': 'test1'},
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled', 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5)},
- {'binary': 'cinder-volume',
- 'host': 'host2',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime(2012, 9, 18,
- 8, 3, 38)}]}
+ response = {'services': [
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5)},
+ {'binary': 'cinder-volume',
+ 'host': 'host2',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18,
+ 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_service(self):
req = FakeRequestWithService()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5),
- 'disabled_reason': 'test2'},
- {'binary': 'cinder-volume',
- 'host': 'host2',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime(2012, 9, 18,
- 8, 3, 38),
- 'disabled_reason': 'test4'}]}
+ response = {'services': [
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'cinder-volume',
+ 'host': 'host2',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18,
+ 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_binary(self):
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5)},
- {'binary': 'cinder-volume',
- 'host': 'host2',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime(2012, 9, 18,
- 8, 3, 38)}]}
+ response = {'services': [
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5)},
+ {'binary': 'cinder-volume',
+ 'host': 'host2',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18,
+ 8, 3, 38)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_binary(self):
req = FakeRequestWithBinary()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5),
- 'disabled_reason': 'test2'},
- {'binary': 'cinder-volume',
- 'host': 'host2',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'down',
- 'updated_at': datetime(2012, 9, 18,
- 8, 3, 38),
- 'disabled_reason': 'test4'}]}
+ response = {'services': [
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5),
+ 'disabled_reason': 'test2'},
+ {'binary': 'cinder-volume',
+ 'host': 'host2',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'down',
+ 'updated_at': datetime.datetime(2012, 9, 18,
+ 8, 3, 38),
+ 'disabled_reason': 'test4'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5)}]}
+ response = {'services': [
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host_service(self):
req = FakeRequestWithHostService()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5),
- 'disabled_reason': 'test2'}]}
+ response = {'services': [
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_list_with_host_binary(self):
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5)}]}
+ response = {'services': [
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5)}]}
self.assertEqual(res_dict, response)
def test_services_detail_with_host_binary(self):
req = FakeRequestWithHostBinary()
res_dict = self.controller.index(req)
- response = {'services': [{'binary': 'cinder-volume',
- 'host': 'host1',
- 'zone': 'cinder',
- 'status': 'disabled',
- 'state': 'up',
- 'updated_at': datetime(2012, 10, 29,
- 13, 42, 5),
- 'disabled_reason': 'test2'}]}
+ response = {'services': [
+ {'binary': 'cinder-volume',
+ 'host': 'host1',
+ 'zone': 'cinder',
+ 'status': 'disabled',
+ 'state': 'up',
+ 'updated_at': datetime.datetime(2012, 10, 29,
+ 13, 42, 5),
+ 'disabled_reason': 'test2'}]}
self.assertEqual(res_dict, response)
def test_services_enable_with_service_key(self):
from cinder.api.contrib import volume_actions
from cinder import exception
-from cinder.image.glance import GlanceImageService
+from cinder.image import glance
from cinder import test
from cinder.tests.api import fakes
from cinder.tests.api.v2 import stubs
mock_get_volume_image_metadata.side_effect = \
fake_get_volume_image_metadata
- with mock.patch.object(GlanceImageService, "create") \
+ with mock.patch.object(glance.GlanceImageService, "create") \
as mock_create:
mock_create.side_effect = self.fake_image_service_create
mock_get_volume_image_metadata.side_effect = \
fake_get_volume_image_metadata_raise
- with mock.patch.object(GlanceImageService, "create") \
+ with mock.patch.object(glance.GlanceImageService, "create") \
as mock_create:
mock_create.side_effect = self.fake_image_service_create
mock_get_volume_image_metadata.side_effect = \
fake_get_volume_image_metadata
- with mock.patch.object(GlanceImageService, "create") \
+ with mock.patch.object(glance.GlanceImageService, "create") \
as mock_create:
mock_create.side_effect = self.fake_image_service_create
id = 1
# Need to mock create, update, copy_volume_to_image
- with mock.patch.object(GlanceImageService, "create") \
+ with mock.patch.object(glance.GlanceImageService, "create") \
as mock_create:
mock_create.side_effect = self.fake_image_service_create
import webob
from cinder.api import common
-from cinder.api.openstack.wsgi import MetadataXMLDeserializer
-from cinder.api.openstack.wsgi import XMLDeserializer
+from cinder.api.openstack import wsgi
from cinder import db
from cinder import test
from cinder.tests.api import fakes
content_type = 'application/xml'
def _get_image_metadata(self, body):
- deserializer = XMLDeserializer()
+ deserializer = wsgi.XMLDeserializer()
volume = deserializer.find_first_child_named(
minidom.parseString(body), 'volume')
image_metadata = deserializer.find_first_child_named(
volume, 'volume_image_metadata')
- return MetadataXMLDeserializer().extract_metadata(image_metadata)
+ return wsgi.MetadataXMLDeserializer().extract_metadata(image_metadata)
def _get_image_metadata_list(self, body):
- deserializer = XMLDeserializer()
+ deserializer = wsgi.XMLDeserializer()
volumes = deserializer.find_first_child_named(
minidom.parseString(body), 'volumes')
volume_list = deserializer.find_children_named(volumes, 'volume')
volume, 'volume_image_metadata'
)
for volume in volume_list]
- return map(MetadataXMLDeserializer().extract_metadata,
+ return map(wsgi.MetadataXMLDeserializer().extract_metadata,
image_metadata_list)
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.api import fakes
-from cinder.transfer import API
+from cinder import transfer
import cinder.volume
def setUp(self):
super(VolumeTransferAPITestCase, self).setUp()
- self.volume_transfer_api = API()
+ self.volume_transfer_api = transfer.API()
self.controller = volume_transfer.VolumeTransferController()
def _create_transfer(self, volume_id=1,
# License for the specific language governing permissions and limitations
# under the License.
-from cinder.backup.driver import BackupDriver
+from cinder.backup import driver
from cinder.openstack.common import log as logging
LOG = logging.getLogger(__name__)
-class FakeBackupService(BackupDriver):
+class FakeBackupService(driver.BackupDriver):
def __init__(self, context, db_driver=None):
super(FakeBackupService, self).__init__(context, db_driver)
"""Tests for db purge."""
-from datetime import datetime
-from datetime import timedelta
+import datetime
import uuid
from cinder import context
ins_stmt = self.vm.insert().values(volume_id=uuidstr)
self.conn.execute(ins_stmt)
# Set 4 of them deleted, 2 are 60 days ago, 2 are 20 days ago
- old = datetime.now() - timedelta(days=20)
- older = datetime.now() - timedelta(days=60)
+ old = datetime.datetime.now() - datetime.timedelta(days=20)
+ older = datetime.datetime.now() - datetime.timedelta(days=60)
make_old = self.volumes.update().\
where(self.volumes.c.id.in_(self.uuidstrs[1:3]))\
.values(deleted_at=old)
from cinder.i18n import _LE
from cinder.openstack.common import log as logging
-from cinder.tests.brick.fake_lvm import FakeBrickLVM
+from cinder.tests.brick import fake_lvm
from cinder.volume import driver
from cinder.volume.drivers import lvm
from cinder.zonemanager import utils as fczm_utils
def __init__(self, *args, **kwargs):
super(FakeISCSIDriver, self).__init__(execute=self.fake_execute,
*args, **kwargs)
- self.vg = FakeBrickLVM('cinder-volumes', False,
- None, 'default',
- self.fake_execute)
+ self.vg = fake_lvm.FakeBrickLVM('cinder-volumes', False,
+ None, 'default',
+ self.fake_execute)
def check_for_setup_error(self):
"""No setup necessary in fake mode."""
from oslo_config import cfg
from cinder import context
-from cinder.openstack.common.scheduler.weights import HostWeightHandler
-from cinder.scheduler.weights.capacity import AllocatedCapacityWeigher as ACW
+from cinder.openstack.common.scheduler import weights
+from cinder.scheduler.weights import capacity
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.volume import utils
def setUp(self):
super(AllocatedCapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
- self.weight_handler = HostWeightHandler('cinder.scheduler.weights')
+ self.weight_handler = weights.HostWeightHandler(
+ 'cinder.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {}
- return self.weight_handler.get_weighed_objects([ACW], hosts,
- weight_properties)[0]
+ return self.weight_handler.get_weighed_objects(
+ [capacity.AllocatedCapacityWeigher], hosts,
+ weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):
from oslo_config import cfg
from cinder import context
-from cinder.openstack.common.scheduler.weights import HostWeightHandler
-from cinder.scheduler.weights.capacity import CapacityWeigher
+from cinder.openstack.common.scheduler import weights
+from cinder.scheduler.weights import capacity
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.volume import utils
def setUp(self):
super(CapacityWeigherTestCase, self).setUp()
self.host_manager = fakes.FakeHostManager()
- self.weight_handler = HostWeightHandler('cinder.scheduler.weights')
+ self.weight_handler = weights.HostWeightHandler(
+ 'cinder.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'size': 1}
- return self.weight_handler.get_weighed_objects([CapacityWeigher],
- hosts,
- weight_properties)[0]
+ return self.weight_handler.get_weighed_objects(
+ [capacity.CapacityWeigher],
+ hosts,
+ weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):
import mock
from cinder.scheduler import host_manager
-from cinder.scheduler.weights.chance import ChanceWeigher
+from cinder.scheduler.weights import chance
from cinder import test
# stub random.random() to verify the ChanceWeigher
# is using random.random() (repeated calls to weigh should
# return incrementing weights)
- weigher = ChanceWeigher()
+ weigher = chance.ChanceWeigher()
_mock_random.side_effect = self.fake_random
self.fake_random(reset=True)
host_state = {'host': 'host.example.com', 'free_capacity_gb': 99999}
hm = host_manager.HostManager()
weighers = hm._choose_host_weighers('ChanceWeigher')
self.assertEqual(1, len(weighers))
- self.assertEqual(weighers[0], ChanceWeigher)
+ self.assertEqual(weighers[0], chance.ChanceWeigher)
def test_use_of_chance_weigher_via_host_manager(self):
# ensure we don't lose any hosts when weighing with
Tests For Goodness Weigher.
"""
-from cinder.scheduler.weights.goodness import GoodnessWeigher
+from cinder.scheduler.weights import goodness
from cinder import test
from cinder.tests.scheduler import fakes
super(GoodnessWeigherTestCase, self).setUp()
def test_goodness_weigher_passing_host(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
self.assertEqual(50, weight)
def test_goodness_weigher_capabilities_substitution(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
self.assertEqual(60, weight)
def test_goodness_weigher_extra_specs_substitution(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
self.assertEqual(60, weight)
def test_goodness_weigher_volume_substitution(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
self.assertEqual(60, weight)
def test_goodness_weigher_qos_substitution(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
self.assertEqual(60, weight)
def test_goodness_weigher_stats_substitution(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
self.assertEqual(100, weight)
def test_goodness_weigher_invalid_substitution(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
self.assertEqual(0, weight)
def test_goodness_weigher_host_rating_out_of_bounds(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
self.assertEqual(0, weight)
def test_goodness_weigher_invalid_goodness_function(self):
- weigher = GoodnessWeigher()
+ weigher = goodness.GoodnessWeigher()
host_state = fakes.FakeHostState('host1', {
'host': 'host.example.com',
'capabilities': {
from cinder import context
from cinder.db.sqlalchemy import api
-from cinder.openstack.common.scheduler.weights import HostWeightHandler
-from cinder.scheduler.weights.volume_number import VolumeNumberWeigher
+from cinder.openstack.common.scheduler import weights
+from cinder.scheduler.weights import volume_number
from cinder import test
from cinder.tests.scheduler import fakes
from cinder.volume import utils
super(VolumeNumberWeigherTestCase, self).setUp()
self.context = context.get_admin_context()
self.host_manager = fakes.FakeHostManager()
- self.weight_handler = HostWeightHandler('cinder.scheduler.weights')
+ self.weight_handler = weights.HostWeightHandler(
+ 'cinder.scheduler.weights')
def _get_weighed_host(self, hosts, weight_properties=None):
if weight_properties is None:
weight_properties = {'context': self.context}
- return self.weight_handler.get_weighed_objects([VolumeNumberWeigher],
- hosts,
- weight_properties)[0]
+ return self.weight_handler.get_weighed_objects(
+ [volume_number.VolumeNumberWeigher],
+ hosts,
+ weight_properties)[0]
@mock.patch('cinder.db.sqlalchemy.api.service_get_all_by_topic')
def _get_all_hosts(self, _mock_service_get_all_by_topic, disabled=False):
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
-from cinder.tests.backup.fake_service_with_verify import\
- get_backup_driver
+from cinder.tests.backup import fake_service_with_verify as fake_service
CONF = cfg.CONF
'_map_service_to_driver') as \
mock_map_service_to_driver:
mock_map_service_to_driver.return_value = \
- get_backup_driver(self.ctxt)
+ fake_service.get_backup_driver(self.ctxt)
self.backup_mgr.reset_status(self.ctxt,
backup_id,
'available')
from oslo_config import cfg
from swiftclient import client as swift
-from cinder.backup.drivers.swift import SwiftBackupDriver
+from cinder.backup.drivers import swift as swift_dr
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder.openstack.common import log as logging
from cinder import test
-from cinder.tests.backup.fake_swift_client import FakeSwiftClient
+from cinder.tests.backup import fake_swift_client
LOG = logging.getLogger(__name__)
self.ctxt = context.get_admin_context()
self.ctxt.service_catalog = service_catalog
- self.stubs.Set(swift, 'Connection', FakeSwiftClient.Connection)
+ self.stubs.Set(swift, 'Connection',
+ fake_swift_client.FakeSwiftClient.Connection)
self.stubs.Set(hashlib, 'md5', fake_md5)
self._create_volume_db_entry()
u'adminURL': u'http://example.com'}]
}]
self.assertRaises(exception.BackupDriverException,
- SwiftBackupDriver,
+ swift_dr.SwiftBackupDriver,
self.ctxt)
def test_backup_swift_url_conf(self):
}]
self.ctxt.project_id = "12345678"
self.override_config("backup_swift_url", "http://public.example.com/")
- backup = SwiftBackupDriver(self.ctxt)
+ backup = swift_dr.SwiftBackupDriver(self.ctxt)
self.assertEqual("%s%s" % (CONF.backup_swift_url,
self.ctxt.project_id),
backup.swift_url)
def test_backup_swift_info(self):
self.override_config("swift_catalog_info", "dummy")
self.assertRaises(exception.BackupDriverException,
- SwiftBackupDriver,
+ swift_dr.SwiftBackupDriver,
self.ctxt)
def test_backup_uncompressed(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_bz2(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='bz2')
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_zlib(self):
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='zlib')
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_default_container(self):
self._create_backup_db_entry(container=None)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
# the _send_progress method will be called for sure.
CONF.set_override("backup_object_number_per_notification", 1)
CONF.set_override("backup_swift_enable_progress_timer", False)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
_send_progress.reset_mock()
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
_send_progress_end.reset_mock()
CONF.set_override("backup_object_number_per_notification", 10)
CONF.set_override("backup_swift_enable_progress_timer", True)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_backup_custom_container(self):
container_name = 'fake99'
self._create_backup_db_entry(container=container_name)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
service.backup(backup, self.volume_file)
def test_create_backup_put_object_wraps_socket_error(self):
container_name = 'socket_error_on_put'
self._create_backup_db_entry(container=container_name)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
"""
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
- self.stubs.Set(SwiftBackupDriver, '_backup_metadata',
+ self.stubs.Set(swift_dr.SwiftBackupDriver, '_backup_metadata',
fake_backup_metadata)
# We expect that an exception be notified directly.
"""
self._create_backup_db_entry()
self.flags(backup_compression_algorithm='none')
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
self.volume_file.seek(0)
backup = db.backup_get(self.ctxt, 123)
raise exception.BackupDriverException(message=_('fake'))
# Raise a pseudo exception.BackupDriverException.
- self.stubs.Set(SwiftBackupDriver, '_backup_metadata',
+ self.stubs.Set(swift_dr.SwiftBackupDriver, '_backup_metadata',
fake_backup_metadata)
def fake_delete(self, backup):
raise exception.BackupOperationError()
# Raise a pseudo exception.BackupOperationError.
- self.stubs.Set(SwiftBackupDriver, 'delete', fake_delete)
+ self.stubs.Set(swift_dr.SwiftBackupDriver, 'delete', fake_delete)
# We expect that the second exception is notified.
self.assertRaises(exception.BackupOperationError,
def test_restore(self):
self._create_backup_db_entry()
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
def test_restore_wraps_socket_error(self):
container_name = 'socket_error_on_get'
self._create_backup_db_entry(container=container_name)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
def test_restore_unsupported_version(self):
container_name = 'unsupported_version'
self._create_backup_db_entry(container=container_name)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
with tempfile.NamedTemporaryFile() as volume_file:
backup = db.backup_get(self.ctxt, 123)
def test_delete(self):
self._create_backup_db_entry()
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123)
service.delete(backup)
def test_delete_wraps_socket_error(self):
container_name = 'socket_error_on_delete'
self._create_backup_db_entry(container=container_name)
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
backup = db.backup_get(self.ctxt, 123)
self.assertRaises(exception.SwiftConnectionFailed,
service.delete,
backup)
def test_get_compressor(self):
- service = SwiftBackupDriver(self.ctxt)
+ service = swift_dr.SwiftBackupDriver(self.ctxt)
compressor = service._get_compressor('None')
self.assertIsNone(compressor)
compressor = service._get_compressor('zlib')
import cinder.exception
import cinder.test
from cinder.volume import configuration as conf
-from cinder.volume.drivers.block_device import BlockDeviceDriver
+from cinder.volume.drivers import block_device
from cinder.volume import utils as volutils
self.host = 'localhost'
self.configuration.iscsi_port = 3260
self.configuration.volume_dd_blocksize = 1234
- self.drv = BlockDeviceDriver(configuration=self.configuration,
- host='localhost')
+ self.drv = block_device.BlockDeviceDriver(
+ configuration=self.configuration,
+ host='localhost')
def test_initialize_connection(self):
TEST_VOLUME1 = {'host': 'localhost1',
import mock
import testtools
-from testtools import ExpectedException
-from testtools.matchers import Contains
+from testtools import matchers
from cinder import exception
from cinder.volume import configuration as conf
-from cinder.volume.drivers.cloudbyte.cloudbyte import CloudByteISCSIDriver
+from cinder.volume.drivers.cloudbyte import cloudbyte
# A fake list account response of cloudbyte's elasticenter
FAKE_LIST_ACCOUNT_RESPONSE = """{ "listAccountResponse" : {
configuration = conf.Configuration(None, None)
# initialize the elasticenter iscsi driver
- self.driver = CloudByteISCSIDriver(configuration=configuration)
+ self.driver = cloudbyte.CloudByteISCSIDriver(
+ configuration=configuration)
# override some parts of driver configuration
self.driver.configuration.cb_tsm_name = 'openstack'
return volume_id
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_execute_and_get_response_details')
def test_api_request_for_cloudbyte(self, mock_conn):
mock_conn.side_effect = self._side_effect_get_err_connection
# run the test
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Failed to execute CloudByte API'):
mock_conn.side_effect = self._side_effect_get_err_connection2
# run the test
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Error executing CloudByte API'):
self.driver._api_request_for_cloudbyte('listTsm', {})
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_delete_volume(self, mock_api_req):
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_delete_snapshot(self, mock_api_req):
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_snapshot(self, mock_api_req):
mock_api_req.side_effect = self._side_effect_api_req
# now run the test & assert the exception
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Failed to create snapshot'):
# assert that no api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_volume(self, mock_api_req):
'CustomerA', self.driver.configuration.cb_account_name)
self.assertThat(
provider_details['provider_location'],
- Contains('172.16.50.35:3260'))
+ matchers.Contains('172.16.50.35:3260'))
# assert that 9 api calls were invoked
self.assertEqual(9, mock_api_req.call_count)
mock_api_req.side_effect = self._side_effect_api_req
# now run the test & assert the exception
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Volume \[NotExists\] not found in "
mock_api_req.side_effect = self._side_effect_api_req_to_create_vol
# now run the test & assert the exception
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
'Bad or unexpected response from the storage volume '
'backend API: Null response received while '
mock_api_req.side_effect = self._side_effect_api_req_to_list_filesystem
# now run the test
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
self._side_effect_api_req_to_list_vol_iscsi_service)
# now run the test
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
self._side_effect_api_req_to_list_iscsi_initiator)
# now run the test
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: Null response received from CloudByte's "
"list iscsi initiators."):
self.driver.create_volume(volume)
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'create_volume_from_snapshot')
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'create_snapshot')
def test_create_cloned_volume(self, mock_create_snapshot,
mock_create_vol_from_snap, mock_api_req):
# assert that n api calls were invoked
self.assertEqual(0, mock_api_req.call_count)
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_volume_from_snapshot(self, mock_api_req):
# assert n api calls were invoked
self.assertEqual(1, mock_api_req.call_count)
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_extend_volume(self, mock_api_req):
# assert n api calls were invoked
self.assertEqual(1, mock_api_req.call_count)
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_create_export(self, mock_api_req):
# assert the result
self.assertEqual(None, model_update['provider_auth'])
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_ensure_export(self, mock_api_req):
# assert the result to have a provider_auth attribute
self.assertEqual(None, model_update['provider_auth'])
- @mock.patch.object(CloudByteISCSIDriver,
+ @mock.patch.object(cloudbyte.CloudByteISCSIDriver,
'_api_request_for_cloudbyte')
def test_get_volume_stats(self, mock_api_req):
mock_api_req.side_effect = self._side_effect_api_req_to_list_tsm
# run the test with refresh as True
- with ExpectedException(
+ with testtools.ExpectedException(
exception.VolumeBackendAPIException,
"Bad or unexpected response from the storage volume "
"backend API: No response was received from CloudByte "
from cinder import context
from cinder import db
from cinder import exception
-from cinder.quota import ReservableResource
+from cinder import quota
from cinder import test
for i, resource in enumerate(('volumes', 'gigabytes')):
quotas[resource] = db.quota_create(context, project_id,
resource, i + 1)
- resources[resource] = ReservableResource(resource,
- '_sync_%s' % resource)
+ resources[resource] = quota.ReservableResource(resource,
+ '_sync_%s' % resource)
deltas[resource] = i + 1
return db.quota_reserve(
context, resources, quotas, deltas,
from cinder.volume.drivers.dell import dell_storagecenter_api
import mock
-from requests.models import Response
+from requests import models
import uuid
FLDR_PATH = 'StorageCenter/ScVolumeFolder/'
# Create a Response object that indicates OK
- response_ok = Response()
+ response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates created
- response_created = Response()
+ response_created = models.Response()
response_created.status_code = 201
response_created.reason = u'created'
RESPONSE_201 = response_created
# Create a Response object that indicates a failure (no content)
- response_nc = Response()
+ response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
'''
# Create a Response object that indicates OK
- response_ok = Response()
+ response_ok = models.Response()
response_ok.status_code = 200
response_ok.reason = u'ok'
RESPONSE_200 = response_ok
# Create a Response object that indicates a failure (no content)
- response_nc = Response()
+ response_nc = models.Response()
response_nc.status_code = 204
response_nc.reason = u'duplicate'
RESPONSE_204 = response_nc
'module', ['DM_EEXIST'])
-from cinder.volume.drivers.drbdmanagedrv import DrbdManageDriver
+from cinder.volume.drivers import drbdmanagedrv
LOG = logging.getLogger(__name__)
self.stubs.Set(importutils, 'import_object',
self.fake_import_object)
- self.stubs.Set(DrbdManageDriver, 'call_or_reconnect',
+ self.stubs.Set(drbdmanagedrv.DrbdManageDriver,
+ 'call_or_reconnect',
self.fake_issue_dbus_call)
- self.stubs.Set(DrbdManageDriver, 'dbus_connect',
+ self.stubs.Set(drbdmanagedrv.DrbdManageDriver,
+ 'dbus_connect',
self.fake_issue_dbus_connect)
sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_const \
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
- dmd = DrbdManageDriver(configuration=self.configuration)
+ dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_volume(testvol)
self.assertEqual(dmd.odm.calls[0][0], "create_resource")
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
- dmd = DrbdManageDriver(configuration=self.configuration)
+ dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.delete_volume(testvol)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
- dmd = DrbdManageDriver(configuration=self.configuration)
+ dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
data = dmd.local_path(testvol)
self.assertTrue(data.startswith("/dev/drbd"))
testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111',
'volume_id': 'ba253fd0-8068-11e4-98c0-5254008ea111'}
- dmd = DrbdManageDriver(configuration=self.configuration)
+ dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_snapshot(testsnap)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
def test_delete_snapshot(self):
testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
- dmd = DrbdManageDriver(configuration=self.configuration)
+ dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.delete_snapshot(testsnap)
self.assertEqual(dmd.odm.calls[0][0], "list_snapshots")
'volume_type_id': 'drbdmanage',
'created_at': timeutils.utcnow()}
- dmd = DrbdManageDriver(configuration=self.configuration)
+ dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.extend_volume(testvol, 5)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
newvol = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
- dmd = DrbdManageDriver(configuration=self.configuration)
+ dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
dmd.odm = DrbdManageFakeDriver()
dmd.create_cloned_volume(newvol, srcvol)
self.assertEqual(dmd.odm.calls[0][0], "list_volumes")
import shutil
import tempfile
import time
-from xml.dom.minidom import Document
+from xml.dom import minidom
import mock
import six
result = None
if ResultClass == 'EMC_StorageHardwareID':
result = self._assoc_hdwid()
- elif ResultClass == 'EMC_iSCSIProtocolEndpoint':
+ elif ResultClass == 'EMC_iSHEADCSIProtocolEndpoint':
result = self._assoc_endpoint()
elif ResultClass == 'EMC_StorageVolume':
result = self._assoc_storagevolume(objectpath)
def create_fake_config_file_no_fast(self):
- doc = Document()
+ doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
doc = self.add_array_info(doc, emc)
def create_fake_config_file_no_fast_with_add_ons(self):
- doc = Document()
+ doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
doc = self.add_array_info(doc, emc)
def create_fake_config_file_fast(self):
- doc = Document()
+ doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
def create_fake_config_file_no_fast(self):
- doc = Document()
+ doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
def create_fake_config_file_fast(self):
- doc = Document()
+ doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
def create_fake_config_file_fast(self):
- doc = Document()
+ doc = minidom.Document()
emc = doc.createElement("EMC")
doc.appendChild(emc)
from cinder import exception
from cinder import test
-from cinder.tests.utils import ZeroIntervalLoopingCall
+from cinder.tests import utils
from cinder.volume import configuration as conf
-from cinder.volume.drivers.emc.emc_cli_fc import EMCCLIFCDriver
-from cinder.volume.drivers.emc.emc_cli_iscsi import EMCCLIISCSIDriver
-import cinder.volume.drivers.emc.emc_vnx_cli as emc_vnx_cli
-from cinder.volume.drivers.emc.emc_vnx_cli import CommandLineHelper
-from cinder.volume.drivers.emc.emc_vnx_cli import EMCVnxCLICmdError
-from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
+from cinder.volume.drivers.emc import emc_cli_fc
+from cinder.volume.drivers.emc import emc_cli_iscsi
+from cinder.volume.drivers.emc import emc_vnx_cli
+from cinder.zonemanager import fc_san_lookup_service as fc_service
+
SUCCEED = ("", 0)
FAKE_ERROR_RETURN = ("FAKE ERROR", 255)
def setUp(self):
super(DriverTestCaseBase, self).setUp()
- self.stubs.Set(CommandLineHelper, 'command_execute',
+ self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'command_execute',
self.fake_command_execute_for_driver_setup)
- self.stubs.Set(CommandLineHelper, 'get_array_serial',
+ self.stubs.Set(emc_vnx_cli.CommandLineHelper, 'get_array_serial',
mock.Mock(return_value={'array_serial':
'fakeSerial'}))
self.stubs.Set(os.path, 'exists', mock.Mock(return_value=1))
class EMCVNXCLIDriverISCSITestCase(DriverTestCaseBase):
def generateDriver(self, conf):
- return EMCCLIISCSIDriver(configuration=conf)
+ return emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
@mock.patch(
"eventlet.event.Event.wait",
def test_terminate_connection(self):
os.path.exists = mock.Mock(return_value=1)
- self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+ self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
+ configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
- self.assertRaises(EMCVnxCLICmdError,
+ self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.create_volume,
self.testData.test_failed_volume)
expect_cmd = [mock.call(*self.testData.LUN_CREATION_CMD(
fake_cli = self.driverSetup(commands, results)
# case
- self.assertRaises(EMCVnxCLICmdError,
+ self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.create_snapshot,
self.testData.test_failed_snapshot)
-
# verification
expect_cmd = [
mock.call(
fake_cli.assert_has_calls(expect_cmd)
@mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
- new=ZeroIntervalLoopingCall)
+ new=utils.ZeroIntervalLoopingCall)
def test_create_volume_from_snapshot_sync_failed(self):
cmd_dest = self.testData.LUN_PROPERTY_ALL_CMD("vol2_dest")
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
- self.assertRaises(EMCVnxCLICmdError,
+ self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_failed_volume)
expected = [mock.call(*self.testData.LUN_DELETE_CMD('failed_vol1'))]
results = [self.testData.LUN_DELETE_IN_SG_ERROR(),
self.testData.LUN_DELETE_IN_SG_ERROR(False)]
self.driverSetup(commands, results)
- self.assertRaises(EMCVnxCLICmdError,
+ self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_volume1_in_sg)
- self.assertRaises(EMCVnxCLICmdError,
+ self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.delete_volume,
self.testData.test_volume2_in_sg)
results = [FAKE_ERROR_RETURN]
fake_cli = self.driverSetup(commands, results)
- self.assertRaises(EMCVnxCLICmdError,
+ self.assertRaises(exception.EMCVnxCLICmdError,
self.driver.extend_volume,
self.testData.test_failed_volume,
2)
fake_cli.assert_has_calls(expected)
@mock.patch('cinder.openstack.common.loopingcall.FixedIntervalLoopingCall',
- new=ZeroIntervalLoopingCall)
+ new=utils.ZeroIntervalLoopingCall)
def test_extend_volume_failed(self):
commands = [self.testData.LUN_PROPERTY_ALL_CMD('failed_vol1')]
results = [self.testData.LUN_PROPERTY('failed_vol1', size=2)]
results = [SUCCEED]
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
- self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+ self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
+ configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
# mock the command executor
fake_command_execute = self.get_command_execute_simulator(
results = [self.testData.LUN_PROPERTY('lun_name')]
invalid_pool_name = "fake_pool"
self.configuration.storage_vnx_pool_name = invalid_pool_name
- self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+ self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
+ configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
# mock the command executor
fake_command_execute = self.get_command_execute_simulator(
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
- self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+ self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
+ configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
# mock the command executor
'-Deduplication',
'-ThinProvisioning',
'-FAST']
- CommandLineHelper.get_array_serial = mock.Mock(
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
'-Deduplication',
'-ThinProvisioning',
'-FAST']
- CommandLineHelper.get_array_serial = mock.Mock(
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
'-Deduplication',
'-ThinProvisioning',
'-FAST']
- CommandLineHelper.get_array_serial = mock.Mock(
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
'-Deduplication',
'-ThinProvisioning',
'-FAST']
- CommandLineHelper.get_array_serial = mock.Mock(
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
'-Deduplication',
'-ThinProvisioning',
'-FAST']
- CommandLineHelper.get_array_serial = mock.Mock(
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
retyped = self.driver.retype(None, self.testData.test_volume3,
'-Deduplication',
'-ThinProvisioning',
'-FAST']
- CommandLineHelper.get_array_serial = mock.Mock(
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
'-Deduplication',
'-ThinProvisioning',
'-FAST']
- CommandLineHelper.get_array_serial = mock.Mock(
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
retyped = self.driver.retype(None, self.testData.test_volume3,
'-Deduplication',
'-ThinProvisioning',
'-FAST']
- CommandLineHelper.get_array_serial = mock.Mock(
+ emc_vnx_cli.CommandLineHelper.get_array_serial = mock.Mock(
return_value={'array_serial': "FNM00124500890"})
self.driver.retype(None, self.testData.test_volume3,
self.configuration.storage_vnx_pool_name = \
self.testData.test_pool_name
- self.driver = EMCCLIISCSIDriver(configuration=self.configuration)
+ self.driver = emc_cli_iscsi.EMCCLIISCSIDriver(
+ configuration=self.configuration)
assert isinstance(self.driver.cli, emc_vnx_cli.EMCVnxCliPool)
cli_helper = self.driver.cli._client
'volume_backend_name': 'namedbackend'})
def generateDriver(self, conf):
- driver = EMCCLIISCSIDriver(configuration=conf)
+ driver = emc_cli_iscsi.EMCCLIISCSIDriver(configuration=conf)
self.assertTrue(isinstance(driver.cli,
emc_vnx_cli.EMCVnxCliArray))
return driver
class EMCVNXCLIDriverFCTestCase(DriverTestCaseBase):
def generateDriver(self, conf):
- return EMCCLIFCDriver(configuration=conf)
+ return emc_cli_fc.EMCCLIFCDriver(configuration=conf)
@mock.patch(
"oslo_concurrency.processutils.execute",
('', 0),
self.testData.FC_PORTS]
fake_cli = self.driverSetup(commands, results)
- self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
- configuration=self.configuration)
+ self.driver.cli.zonemanager_lookup_service =\
+ fc_service.FCSanLookupService(configuration=self.configuration)
conn_info = self.driver.initialize_connection(
self.testData.test_volume,
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_false(self):
- self.driver = EMCCLIFCDriver(configuration=self.configuration)
+ self.driver = emc_cli_fc.EMCCLIFCDriver(
+ configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
cli_helper.get_storage_group = mock.Mock(
return_value=data)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
- self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
- configuration=self.configuration)
+ self.driver.cli.zonemanager_lookup_service =\
+ fc_service.FCSanLookupService(configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
"get_device_mapping_from_network",
mock.Mock(return_value=EMCVNXCLIDriverTestData.device_map))
def test_terminate_connection_remove_zone_true(self):
- self.driver = EMCCLIFCDriver(configuration=self.configuration)
+ self.driver = emc_cli_fc.EMCCLIFCDriver(
+ configuration=self.configuration)
cli_helper = self.driver.cli._client
data = {'storage_group_name': "fakehost",
'storage_group_uid': "2F:D4:00:00:00:00:00:"
cli_helper.get_storage_group = mock.Mock(
return_value=data)
cli_helper.remove_hlu_from_storagegroup = mock.Mock()
- self.driver.cli.zonemanager_lookup_service = FCSanLookupService(
- configuration=self.configuration)
+ self.driver.cli.zonemanager_lookup_service =\
+ fc_service.FCSanLookupService(configuration=self.configuration)
connection_info = self.driver.terminate_connection(
self.testData.test_volume,
self.testData.connector)
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
-from cinder.volume.drivers.emc.xtremio import XtremIOFibreChannelDriver
-from cinder.volume.drivers.emc.xtremio import XtremIOISCSIDriver
+from cinder.volume.drivers.emc import xtremio
LOG = logging.getLogger(__name__)
configuration.san_login = ''
configuration.san_password = ''
configuration.san_ip = ''
- self.driver = XtremIOISCSIDriver(configuration=configuration)
+ self.driver = xtremio.XtremIOISCSIDriver(configuration=configuration)
self.data = CommonData()
configuration.san_login = ''
configuration.san_password = ''
configuration.san_ip = ''
- self.driver = XtremIOFibreChannelDriver(configuration=configuration)
+ self.driver = xtremio.XtremIOFibreChannelDriver(
+ configuration=configuration)
self.data = CommonData()
# under the License.
from cinder import exception
-from cinder.scheduler.evaluator.evaluator import evaluate
+from cinder.scheduler.evaluator import evaluator
from cinder import test
class EvaluatorTestCase(test.TestCase):
def test_simple_integer(self):
- self.assertEqual(2, evaluate("1+1"))
- self.assertEqual(9, evaluate("2+3+4"))
- self.assertEqual(23, evaluate("11+12"))
- self.assertEqual(30, evaluate("5*6"))
- self.assertEqual(2, evaluate("22/11"))
- self.assertEqual(38, evaluate("109-71"))
- self.assertEqual(493, evaluate("872 - 453 + 44 / 22 * 4 + 66"))
+ self.assertEqual(2, evaluator.evaluate("1+1"))
+ self.assertEqual(9, evaluator.evaluate("2+3+4"))
+ self.assertEqual(23, evaluator.evaluate("11+12"))
+ self.assertEqual(30, evaluator.evaluate("5*6"))
+ self.assertEqual(2, evaluator.evaluate("22/11"))
+ self.assertEqual(38, evaluator.evaluate("109-71"))
+ self.assertEqual(
+ 493, evaluator.evaluate("872 - 453 + 44 / 22 * 4 + 66"))
def test_simple_float(self):
- self.assertEqual(2.0, evaluate("1.0 + 1.0"))
- self.assertEqual(2.5, evaluate("1.5 + 1.0"))
- self.assertEqual(3.0, evaluate("1.5 * 2.0"))
+ self.assertEqual(2.0, evaluator.evaluate("1.0 + 1.0"))
+ self.assertEqual(2.5, evaluator.evaluate("1.5 + 1.0"))
+ self.assertEqual(3.0, evaluator.evaluate("1.5 * 2.0"))
def test_int_float_mix(self):
- self.assertEqual(2.5, evaluate("1.5 + 1"))
- self.assertEqual(4.25, evaluate("8.5 / 2"))
- self.assertEqual(5.25, evaluate("10/4+0.75 + 2"))
+ self.assertEqual(2.5, evaluator.evaluate("1.5 + 1"))
+ self.assertEqual(4.25, evaluator.evaluate("8.5 / 2"))
+ self.assertEqual(5.25, evaluator.evaluate("10/4+0.75 + 2"))
def test_negative_numbers(self):
- self.assertEqual(-2, evaluate("-2"))
- self.assertEqual(-1, evaluate("-2+1"))
- self.assertEqual(3, evaluate("5+-2"))
+ self.assertEqual(-2, evaluator.evaluate("-2"))
+ self.assertEqual(-1, evaluator.evaluate("-2+1"))
+ self.assertEqual(3, evaluator.evaluate("5+-2"))
def test_exponent(self):
- self.assertEqual(8, evaluate("2^3"))
- self.assertEqual(-8, evaluate("-2 ^ 3"))
- self.assertEqual(15.625, evaluate("2.5 ^ 3"))
- self.assertEqual(8, evaluate("4 ^ 1.5"))
+ self.assertEqual(8, evaluator.evaluate("2^3"))
+ self.assertEqual(-8, evaluator.evaluate("-2 ^ 3"))
+ self.assertEqual(15.625, evaluator.evaluate("2.5 ^ 3"))
+ self.assertEqual(8, evaluator.evaluate("4 ^ 1.5"))
def test_function(self):
- self.assertEqual(5, evaluate("abs(-5)"))
- self.assertEqual(2, evaluate("abs(2)"))
- self.assertEqual(1, evaluate("min(1, 100)"))
- self.assertEqual(100, evaluate("max(1, 100)"))
+ self.assertEqual(5, evaluator.evaluate("abs(-5)"))
+ self.assertEqual(2, evaluator.evaluate("abs(2)"))
+ self.assertEqual(1, evaluator.evaluate("min(1, 100)"))
+ self.assertEqual(100, evaluator.evaluate("max(1, 100)"))
def test_parentheses(self):
- self.assertEqual(1, evaluate("(1)"))
- self.assertEqual(-1, evaluate("(-1)"))
- self.assertEqual(2, evaluate("(1+1)"))
- self.assertEqual(15, evaluate("(1+2) * 5"))
- self.assertEqual(3, evaluate("(1+2)*(3-1)/((1+(2-1)))"))
- self.assertEqual(-8.0, evaluate("((1.0 / 0.5) * (2)) *(-2)"))
+ self.assertEqual(1, evaluator.evaluate("(1)"))
+ self.assertEqual(-1, evaluator.evaluate("(-1)"))
+ self.assertEqual(2, evaluator.evaluate("(1+1)"))
+ self.assertEqual(15, evaluator.evaluate("(1+2) * 5"))
+ self.assertEqual(3, evaluator.evaluate("(1+2)*(3-1)/((1+(2-1)))"))
+ self.assertEqual(
+ -8.0, evaluator. evaluate("((1.0 / 0.5) * (2)) *(-2)"))
def test_comparisons(self):
- self.assertEqual(True, evaluate("1 < 2"))
- self.assertEqual(True, evaluate("2 > 1"))
- self.assertEqual(True, evaluate("2 != 1"))
- self.assertEqual(False, evaluate("1 > 2"))
- self.assertEqual(False, evaluate("2 < 1"))
- self.assertEqual(False, evaluate("2 == 1"))
- self.assertEqual(True, evaluate("(1 == 1) == !(1 == 2)"))
+ self.assertEqual(True, evaluator.evaluate("1 < 2"))
+ self.assertEqual(True, evaluator.evaluate("2 > 1"))
+ self.assertEqual(True, evaluator.evaluate("2 != 1"))
+ self.assertEqual(False, evaluator.evaluate("1 > 2"))
+ self.assertEqual(False, evaluator.evaluate("2 < 1"))
+ self.assertEqual(False, evaluator.evaluate("2 == 1"))
+ self.assertEqual(True, evaluator.evaluate("(1 == 1) == !(1 == 2)"))
def test_logic_ops(self):
- self.assertEqual(True, evaluate("(1 == 1) AND (2 == 2)"))
- self.assertEqual(True, evaluate("(1 == 1) and (2 == 2)"))
- self.assertEqual(True, evaluate("(1 == 1) && (2 == 2)"))
- self.assertEqual(False, evaluate("(1 == 1) && (5 == 2)"))
-
- self.assertEqual(True, evaluate("(1 == 1) OR (5 == 2)"))
- self.assertEqual(True, evaluate("(1 == 1) or (5 == 2)"))
- self.assertEqual(True, evaluate("(1 == 1) || (5 == 2)"))
- self.assertEqual(False, evaluate("(5 == 1) || (5 == 2)"))
-
- self.assertEqual(False, evaluate("(1 == 1) AND NOT (2 == 2)"))
- self.assertEqual(False, evaluate("(1 == 1) AND not (2 == 2)"))
- self.assertEqual(False, evaluate("(1 == 1) AND !(2 == 2)"))
- self.assertEqual(True, evaluate("(1 == 1) AND NOT (5 == 2)"))
- self.assertEqual(True,
- evaluate("(1 == 1) OR NOT (2 == 2) AND (5 == 5)"))
+ self.assertEqual(True, evaluator.evaluate("(1 == 1) AND (2 == 2)"))
+ self.assertEqual(True, evaluator.evaluate("(1 == 1) and (2 == 2)"))
+ self.assertEqual(True, evaluator.evaluate("(1 == 1) && (2 == 2)"))
+ self.assertEqual(False, evaluator.evaluate("(1 == 1) && (5 == 2)"))
+
+ self.assertEqual(True, evaluator.evaluate("(1 == 1) OR (5 == 2)"))
+ self.assertEqual(True, evaluator.evaluate("(1 == 1) or (5 == 2)"))
+ self.assertEqual(True, evaluator.evaluate("(1 == 1) || (5 == 2)"))
+ self.assertEqual(False, evaluator.evaluate("(5 == 1) || (5 == 2)"))
+
+ self.assertEqual(
+ False, evaluator.evaluate("(1 == 1) AND NOT (2 == 2)"))
+ self.assertEqual(
+ False, evaluator.evaluate("(1 == 1) AND not (2 == 2)"))
+ self.assertEqual(
+ False, evaluator.evaluate("(1 == 1) AND !(2 == 2)"))
+ self.assertEqual(
+ True, evaluator.evaluate("(1 == 1) AND NOT (5 == 2)"))
+ self.assertEqual(
+ True, evaluator.evaluate("(1 == 1) OR NOT (2 == 2) AND (5 == 5)"))
def test_ternary_conditional(self):
- self.assertEqual(5, evaluate("(1 < 2) ? 5 : 10"))
- self.assertEqual(10, evaluate("(1 > 2) ? 5 : 10"))
+ self.assertEqual(5, evaluator.evaluate("(1 < 2) ? 5 : 10"))
+ self.assertEqual(10, evaluator.evaluate("(1 > 2) ? 5 : 10"))
def test_variables_dict(self):
stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407}
request = {'iops': 500, 'size': 4}
- self.assertEqual(1500, evaluate("stats.iops + request.iops",
- stats=stats, request=request))
+ self.assertEqual(1500, evaluator.evaluate("stats.iops + request.iops",
+ stats=stats,
+ request=request))
def test_missing_var(self):
stats = {'iops': 1000, 'usage': 0.65, 'count': 503, 'free_space': 407}
request = {'iops': 500, 'size': 4}
self.assertRaises(exception.EvaluatorParseException,
- evaluate,
+ evaluator.evaluate,
"foo.bob + 5",
stats=stats, request=request)
self.assertRaises(exception.EvaluatorParseException,
- evaluate,
+ evaluator.evaluate,
"stats.bob + 5",
stats=stats, request=request)
self.assertRaises(exception.EvaluatorParseException,
- evaluate,
+ evaluator.evaluate,
"fake.var + 1",
stats=stats, request=request, fake=None)
def test_bad_expression(self):
self.assertRaises(exception.EvaluatorParseException,
- evaluate,
+ evaluator.evaluate,
"1/*1")
def test_nonnumber_comparison(self):
request = {'test': 'bar'}
self.assertRaises(
exception.EvaluatorParseException,
- evaluate,
+ evaluator.evaluate,
"nonnumber.test != request.test",
nonnumber=nonnumber, request=request)
def test_div_zero(self):
self.assertRaises(exception.EvaluatorParseException,
- evaluate,
+ evaluator.evaluate,
"7 / 0")
# self.stubs.Set(self.driver.configuration, 'safe_get',
# self.fake_configuration_safe_get)
- self.stubs.Set(eternus_dx_iscsi.FJDXISCSIDriver, '_do_iscsi_discovery',
+ self.stubs.Set(eternus_dx_iscsi.FJDXISCSIDriver,
+ '_do_iscsi_discovery',
self.fake_do_iscsi_discovery)
- self.stubs.Set(eternus_dx_common.FJDXCommon, '_get_ecom_connection',
+ self.stubs.Set(eternus_dx_common.FJDXCommon,
+ '_get_ecom_connection',
self.fake_ecom_connection)
instancename = FakeCIMInstanceName()
- self.stubs.Set(eternus_dx_common.FJDXCommon, '_getinstancename',
+ self.stubs.Set(eternus_dx_common.FJDXCommon,
+ '_getinstancename',
instancename.fake_getinstancename)
# set iscsi driver to self.driver
import requests
from cinder import context
-from cinder.db.sqlalchemy.models import VolumeMetadata
+from cinder.db.sqlalchemy import models
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
-from cinder.volume.drivers.fusionio.ioControl import FIOconnection
-from cinder.volume.drivers.fusionio.ioControl import FIOioControlDriver
+from cinder.volume.drivers.fusionio import ioControl
from cinder.volume import qos_specs
from cinder.volume import volume_types
super(FIOioControlConnectionTests, self).setUp()
self.configuration = create_configuration()
self.ctxt = context.get_admin_context()
- return_text = json.dumps({"Version": FIOconnection.APIVERSION})
+ return_text = json.dumps(
+ {"Version": ioControl.FIOconnection.APIVERSION})
get_return = FIOFakeResponse(code=200,
text=return_text)
requests.get = mock.Mock(return_value=get_return)
- self.conn = FIOconnection(self.configuration.san_ip,
- self.configuration.san_login,
- self.configuration.san_password,
- self.configuration.fusionio_iocontrol_retry,
- (self.configuration.
- fusionio_iocontrol_verify_cert),)
+ self.conn = ioControl.FIOconnection(
+ self.configuration.san_ip,
+ self.configuration.san_login,
+ self.configuration.san_password,
+ self.configuration.fusionio_iocontrol_retry,
+ (self.configuration.
+ fusionio_iocontrol_verify_cert),)
def test_conn_init_sucess(self):
expected = [mock.call(url=("https://" +
requests.get.assert_has_calls(expected)
def test_wrong_version(self):
- expected = json.dumps({"Version": (FIOconnection.APIVERSION + ".1")})
+ expected = json.dumps(
+ {"Version": (ioControl.FIOconnection.APIVERSION + ".1")})
get_return = FIOFakeResponse(code=200,
text=expected)
requests.get = mock.Mock(return_value=get_return)
self.assertRaises(exception.VolumeDriverException,
- FIOconnection,
+ ioControl.FIOconnection,
self.configuration.san_ip,
self.configuration.san_login,
self.configuration.san_password,
super(FIOioControlTestCases, self).setUp()
self.configuration = create_configuration()
self.ctxt = context.get_admin_context()
- self.drv = FIOioControlDriver(configuration=self.configuration)
+ self.drv = ioControl.FIOioControlDriver(
+ configuration=self.configuration)
self.drv.fio_qos_dict = self.policyTable
def test_do_setup_sucess(self, connmock):
"Driver/Test version Mismatch")
def test_create_volume_QoS_by_presets(self, connmock):
- preset_qos = VolumeMetadata(key='fio-qos', value='Policy 2')
+ preset_qos = models.VolumeMetadata(key='fio-qos', value='Policy 2')
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
# under the License.
import os
-import shutil
import tempfile
import mock
from cinder import context
from cinder import exception
-from cinder.image import image_utils
from cinder.openstack.common import log as logging
from cinder import test
from cinder import utils
from cinder.volume.drivers.ibm import gpfs
from cinder.volume import volume_types
-from mock import patch
LOG = logging.getLogger(__name__)
def test_sizestr(self):
self.assertEqual(gpfs._sizestr('10'), '10G')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_state_ok(self, mock_exec):
mock_exec.return_value = ('mmgetstate::HEADER:version:reserved:'
'reserved:nodeName:nodeNumber:state:quorum:'
self.assertEqual(True, self.driver._get_gpfs_state().splitlines()[1].
startswith('mmgetstate::0:1:::devstack'))
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_state_fail_mmgetstate(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_state)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state')
def test_check_gpfs_state_ok(self, mock_get_gpfs_state):
mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:'
'reserved:reserved:nodeName:'
'quorum node:(undefined):')
self.driver._check_gpfs_state()
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_state')
def test_check_gpfs_state_fail_not_active(self, mock_get_gpfs_state):
mock_get_gpfs_state.return_value = ('mmgetstate::HEADER:version:'
'reserved:reserved:nodeName:'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._check_gpfs_state)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_fs_from_path_ok(self, mock_exec):
mock_exec.return_value = ('Filesystem 1K-blocks '
'Used Available Use%% Mounted on\n'
self.assertEqual(self.driver._gpfs_device,
self.driver._get_filesystem_from_path('/gpfs0'))
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_fs_from_path_fail_path(self, mock_exec):
mock_exec.return_value = ('Filesystem 1K-blocks '
'Used Available Use% Mounted on\n'
self.assertNotEqual(self.driver._gpfs_device,
self.driver._get_filesystem_from_path('/gpfs0'))
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_fs_from_path_fail_raise(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_filesystem_from_path, '/gpfs0')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_id_ok(self, mock_exec):
mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:'
'reserved:configParameter:value:nodeList:\n'
self.assertEqual(self.driver._cluster_id,
self.driver._get_gpfs_cluster_id())
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_id_fail_id(self, mock_exec):
mock_exec.return_value = ('mmlsconfig::HEADER.:version:reserved:'
'reserved:configParameter:value:nodeList:\n'
self.assertNotEqual(self.driver._cluster_id,
self.driver._get_gpfs_cluster_id())
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_id_fail_raise(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_cluster_id)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_fileset_from_path_ok(self, mock_exec):
mock_exec.return_value = ('file name: /gpfs0\n'
'metadata replication: 1 max 2\n'
'Windows attributes: DIRECTORY', '')
self.driver._get_fileset_from_path('')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_fileset_from_path_fail_mmlsattr(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_fileset_from_path, '')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_fileset_from_path_fail_find_fileset(self, mock_exec):
mock_exec.return_value = ('file name: /gpfs0\n'
'metadata replication: 1 max 2\n'
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_fileset_from_path, '')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_verify_gpfs_pool_ok(self, mock_exec):
mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':'
'\n'
self.assertTrue(self.driver._gpfs_device,
self.driver._verify_gpfs_pool('/dev/gpfs'))
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_verify_gpfs_pool_fail_pool(self, mock_exec):
mock_exec.return_value = ('Storage pools in file system at \'/gpfs0\':'
'\n'
self.assertTrue(self.driver._gpfs_device,
self.driver._verify_gpfs_pool('/dev/gpfs'))
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_verify_gpfs_pool_fail_raise(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertFalse(self.driver._verify_gpfs_pool('/dev/gpfs'))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
+ @mock.patch('cinder.utils.execute')
def test_update_volume_storage_pool_ok(self, mock_exec, mock_verify_pool):
mock_verify_pool.return_value = True
self.assertTrue(self.driver._update_volume_storage_pool('', 'system'))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
+ @mock.patch('cinder.utils.execute')
def test_update_volume_storage_pool_ok_pool_none(self,
mock_exec,
mock_verify_pool):
mock_verify_pool.return_value = True
self.assertTrue(self.driver._update_volume_storage_pool('', None))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
+ @mock.patch('cinder.utils.execute')
def test_update_volume_storage_pool_fail_pool(self,
mock_exec,
mock_verify_pool):
'',
'system')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
+ @mock.patch('cinder.utils.execute')
def test_update_volume_storage_pool_fail_mmchattr(self,
mock_exec,
mock_verify_pool):
mock_verify_pool.return_value = True
self.assertFalse(self.driver._update_volume_storage_pool('', 'system'))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_get_filesystem_from_path')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_filesystem_from_path')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_fs_release_level_ok(self,
mock_exec,
mock_fs_from_path):
self.assertEqual(('/dev/gpfs', 1403),
self.driver._get_gpfs_fs_release_level(''))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_get_filesystem_from_path')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_filesystem_from_path')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_fs_release_level_fail_mmlsfs(self,
mock_exec,
mock_fs_from_path):
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_fs_release_level, '')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_release_level_ok(self, mock_exec):
mock_exec.return_value = ('mmlsconfig::HEADER:version:reserved:'
'reserved:configParameter:value:nodeList:\n'
'')
self.assertEqual(1403, self.driver._get_gpfs_cluster_release_level())
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_gpfs_cluster_release_level_fail_mmlsconfig(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._get_gpfs_cluster_release_level)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_is_gpfs_path_fail_mmlsattr(self, mock_exec):
mock_exec.side_effect = processutils.ProcessExecutionError(
stdout='test', stderr='test')
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._is_gpfs_path, '/dummy/path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_fileset_from_path')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_fileset_from_path')
+ @mock.patch('cinder.utils.execute')
def test_is_same_fileset_ok(self,
mock_exec,
mock_get_fileset_from_path):
mock_get_fileset_from_path.side_effect = [True, False]
self.assertFalse(self.driver._is_same_fileset('', ''))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_available_capacity')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_available_capacity')
+ @mock.patch('cinder.utils.execute')
def test_same_cluster_ok(self, mock_exec, mock_avail_capacity):
mock_avail_capacity.return_value = (10192683008, 10737418240)
stats = self.driver.get_volume_stats()
host = {'host': 'foo', 'capabilities': cap}
self.assertFalse(self.driver._same_cluster(host))
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_set_rw_permission(self, mock_exec):
self.driver._set_rw_permission('')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_can_migrate_locally(self, mock_exec):
host = {'host': 'foo', 'capabilities': ''}
self.assertEqual(self.driver._can_migrate_locally(host), None)
host = {'host': 'foo', 'capabilities': cap}
self.assertEqual(self.driver._can_migrate_locally(host), 'testpath')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_get_filesystem_from_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_cluster_id')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_filesystem_from_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_gpfs_cluster_id')
+ @mock.patch('cinder.utils.execute')
def test_do_setup_ok(self,
mock_exec,
mock_get_gpfs_cluster_id,
mock_verify_gpfs_pool.return_value = True
self.driver.do_setup(ctxt)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_get_filesystem_from_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_cluster_id')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_filesystem_from_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_gpfs_cluster_id')
+ @mock.patch('cinder.utils.execute')
def test_do_setup_fail_get_cluster_id(self,
mock_exec,
mock_get_gpfs_cluster_id,
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, ctxt)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_get_filesystem_from_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_cluster_id')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_filesystem_from_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_gpfs_cluster_id')
+ @mock.patch('cinder.utils.execute')
def test_do_setup_fail_get_fs_from_path(self,
mock_exec,
mock_get_gpfs_cluster_id,
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, ctxt)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_get_filesystem_from_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_gpfs_cluster_id')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_filesystem_from_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_gpfs_cluster_id')
+ @mock.patch('cinder.utils.execute')
def test_do_setup_fail_volume(self,
mock_exec,
mock_get_gpfs_cluster_id,
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.do_setup, ctxt)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._check_gpfs_state')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_get_gpfs_fs_release_level')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._check_gpfs_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_gpfs_fs_release_level')
def test_check_for_setup_error_fail_conf(self,
mock_get_gpfs_fs_rel_lev,
mock_is_gpfs_path,
CONF.gpfs_images_dir = org_value_dir
fake_fs_release = org_fake_fs_release
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_create_sparse_file(self, mock_exec):
self.driver._create_sparse_file('', 100)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_allocate_file_blocks(self, mock_exec):
self.driver._allocate_file_blocks(os.path.join(self.images_dir,
'test'), 1)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_gpfs_change_attributes(self, mock_exec):
options = []
options.extend(['-T', 'test'])
self.driver._gpfs_change_attributes(options, self.images_dir)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._mkfs')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_change_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._mkfs')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_gpfs_change_attributes')
def test_set_volume_attributes(self, mock_change_attributes, mock_mkfs):
metadata = [dict([('key', 'data_pool_name'), ('value', 'test')]),
dict([('key', 'replicas'), ('value', 'test')]),
self.driver._set_volume_attributes('', '', metadata)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_change_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_gpfs_change_attributes')
def test_set_volume_attributes_no_attributes(self, mock_change_attributes):
metadata = []
org_value = self.driver.configuration.gpfs_storage_pool
self.flags(volume_driver=self.driver_name,
gpfs_storage_pool=org_value)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_change_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_gpfs_change_attributes')
def test_set_volume_attributes_no_options(self, mock_change_attributes):
metadata = []
org_value = self.driver.configuration.gpfs_storage_pool
self.flags(volume_driver=self.driver_name,
gpfs_storage_pool=org_value)
- @patch('cinder.utils.execute')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._allocate_file_blocks')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_volume_attributes')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_sparse_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_allocate_file_blocks')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_volume_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_sparse_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_create_volume(self,
mock_gpfs_path_state,
mock_local_path,
self.flags(volume_driver=self.driver_name,
gpfs_sparse_volumes=org_value)
- @patch('cinder.utils.execute')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._allocate_file_blocks')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_volume_attributes')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_sparse_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_allocate_file_blocks')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_volume_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_sparse_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_create_volume_no_sparse_volume(self,
mock_gpfs_path_state,
mock_local_path,
self.flags(volume_driver=self.driver_name,
gpfs_sparse_volumes=org_value)
- @patch('cinder.utils.execute')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._allocate_file_blocks')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_volume_attributes')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_sparse_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_allocate_file_blocks')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_volume_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_sparse_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_create_volume_with_metadata(self,
mock_gpfs_path_state,
mock_local_path,
self.flags(volume_driver=self.driver_name,
gpfs_sparse_volumes=org_value)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_volume_attributes')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_snapshot_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_volume_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.'
+ 'GPFSDriver._get_snapshot_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_create_volume_from_snapshot(self,
mock_local_path,
mock_snapshot_path,
),
{'size': 5.0})
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_volume_attributes')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_snapshot_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_volume_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_snapshot_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_create_volume_from_snapshot_metadata(self,
mock_local_path,
mock_snapshot_path,
snapshot),
{'size': 5.0})
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_volume_attributes')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_clone')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_volume_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_gpfs_clone')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_create_cloned_volume(self,
mock_local_path,
mock_gpfs_full_copy,
self.assertEqual(self.driver.create_cloned_volume(volume, src_volume),
{'size': 5.0})
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_volume_attributes')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_clone')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_volume_attributes')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_gpfs_clone')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_full_copy')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_create_cloned_volume_with_metadata(self,
mock_local_path,
mock_gpfs_full_copy,
self.assertEqual(self.driver.create_cloned_volume(volume, src_volume),
{'size': 5.0})
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_delete_gpfs_file_ok(self, mock_exec):
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
('', '')]
self.driver._delete_gpfs_file(self.images_dir)
- @patch('os.path.exists')
- @patch('cinder.utils.execute')
+ @mock.patch('os.path.exists')
+ @mock.patch('cinder.utils.execute')
def test_delete_gpfs_file_ok_parent(self, mock_exec, mock_path_exists):
mock_path_exists.side_effect = [True, False, False,
True, False, False,
('', '')]
self.driver._delete_gpfs_file(self.images_dir)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_delete_volume(self,
mock_verify_gpfs_path_state,
mock_local_path,
mock_delete_gpfs_file):
self.driver.delete_volume('')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_gpfs_redirect_ok(self, mock_exec):
org_value = self.driver.configuration.gpfs_max_clone_depth
self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=1)
self.flags(volume_driver=self.driver_name,
gpfs_max_clone_depth=org_value)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_gpfs_redirect_fail_depth(self, mock_exec):
org_value = self.driver.configuration.gpfs_max_clone_depth
self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=0)
self.flags(volume_driver=self.driver_name,
gpfs_max_clone_depth=org_value)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_gpfs_redirect_fail_match(self, mock_exec):
org_value = self.driver.configuration.gpfs_max_clone_depth
self.flags(volume_driver=self.driver_name, gpfs_max_clone_depth=1)
self.flags(volume_driver=self.driver_name,
gpfs_max_clone_depth=org_value)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
+ @mock.patch('cinder.utils.execute')
def test_create_gpfs_clone(self,
mock_exec,
mock_redirect,
mock_redirect.side_effect = [True, False]
self.driver._create_gpfs_clone('', '')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_create_gpfs_copy(self, mock_exec):
self.driver._create_gpfs_copy('', '')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_create_gpfs_snap(self, mock_exec):
self.driver._create_gpfs_snap('')
self.driver._create_gpfs_snap('', '')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_is_gpfs_parent_file_ok(self, mock_exec):
mock_exec.side_effect = [('Parent Depth Parent inode File name\n'
'------ ----- -------------- ---------\n'
self.assertEqual(True, self.driver._is_gpfs_parent_file(''))
self.assertEqual(False, self.driver._is_gpfs_parent_file(''))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_snapshot_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_snapshot_path')
def test_create_snapshot(self,
mock_get_snapshot_path,
mock_local_path,
self.flags(volume_driver=self.driver_name,
gpfs_mount_point_base=org_value)
- @patch('cinder.utils.execute')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._get_snapshot_path')
+ @mock.patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_get_snapshot_path')
def test_delete_snapshot(self,
mock_snapshot_path,
mock_exec):
def test_remove_export(self):
self.assertEqual(None, self.driver.remove_export('', ''))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_initialize_connection(self, mock_local_path):
volume = self._fake_volume()
mock_local_path.return_value = "/tmp/fakepath"
self.assertEqual(stats['volume_backend_name'], 'GPFS')
self.assertEqual(stats['storage_protocol'], 'file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._update_volume_stats')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_update_volume_stats')
def test_get_volume_stats_none_stats(self, mock_upd_vol_stats):
_stats_org = self.driver._stats
self.driver._stats = mock.Mock()
self.driver.get_volume_stats()
self.driver._stats = _stats_org
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._clone_image')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._clone_image')
def test_clone_image_pub(self, mock_exec):
self.driver.clone_image('', '', '', {'id': 1}, '')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_is_cloneable_ok(self, mock_is_gpfs_path):
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='test')
'12345')),
self.driver._is_cloneable('12345'))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_is_cloneable_fail_config(self, mock_is_gpfs_path):
self.flags(volume_driver=self.driver_name, gpfs_images_share_mode='')
CONF.gpfs_images_dir = ''
'12345')),
self.driver._is_cloneable('12345'))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_is_cloneable_fail_path(self, mock_is_gpfs_path):
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode='test')
'12345')),
self.driver._is_cloneable('12345'))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.image.image_utils.convert_image')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
- @patch('cinder.image.image_utils.qemu_img_info')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_parent_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.image.image_utils.convert_image')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
+ @mock.patch('cinder.image.image_utils.qemu_img_info')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_is_gpfs_parent_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_clone_image_clonable(self,
mock_verify_gpfs_path_state,
mock_is_cloneable,
self.assertEqual(({'provider_location': None}, True),
self.driver._clone_image(volume, '', 1))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver'
+ '._verify_gpfs_path_state')
def test_clone_image_not_cloneable(self,
mock_verify_gpfs_path_state,
mock_is_cloneable):
self.assertEqual((None, False),
self.driver._clone_image(volume, '', 1))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
- @patch('cinder.image.image_utils.qemu_img_info')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_parent_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_copy')
+ @mock.patch('cinder.image.image_utils.qemu_img_info')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_snap')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_is_gpfs_parent_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_clone_image_format_raw_copy_on_write(self,
mock_verify_gpfs_path_state,
mock_is_cloneable,
gpfs_images_share_mode='copy_on_write')
self.assertEqual(({'provider_location': None}, True),
self.driver._clone_image(volume, '', 1))
- self.driver._create_gpfs_snap.assert_called_once_with(self.images_dir)
+ mock_create_gpfs_snap.assert_called_once_with(self.images_dir)
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('shutil.copyfile')
- @patch('cinder.image.image_utils.qemu_img_info')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_parent_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('shutil.copyfile')
+ @mock.patch('cinder.image.image_utils.qemu_img_info')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_is_gpfs_parent_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_clone_image_format_raw_copy(self,
mock_verify_gpfs_path_state,
mock_is_cloneable,
gpfs_images_share_mode='copy')
self.assertEqual(({'provider_location': None}, True),
self.driver._clone_image(volume, '', 1))
- shutil.copyfile.assert_called_once_with(self.images_dir,
- self.volumes_path)
+ mock_copyfile.assert_called_once_with(self.images_dir,
+ self.volumes_path)
self.flags(volume_driver=self.driver_name,
gpfs_images_share_mode=org_value)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._set_rw_permission')
- @patch('cinder.image.image_utils.convert_image')
- @patch('cinder.image.image_utils.qemu_img_info')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_set_rw_permission')
+ @mock.patch('cinder.image.image_utils.convert_image')
+ @mock.patch('cinder.image.image_utils.qemu_img_info')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_cloneable')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_clone_image_format_qcow2(self,
mock_verify_gpfs_path_state,
mock_is_cloneable,
volume = self._fake_volume()
self.assertEqual(({'provider_location': None}, True),
self.driver._clone_image(volume, '', 1))
- image_utils.convert_image.assert_called_once_with(self.images_dir,
- self.volumes_path,
- 'raw')
-
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.image.image_utils.fetch_to_raw')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
+ mock_conv_image.assert_called_once_with(self.images_dir,
+ self.volumes_path,
+ 'raw')
+
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.image.image_utils.fetch_to_raw')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
def test_copy_image_to_volume(self,
mock_verify_gpfs_path_state,
mock_fetch_to_raw,
volume = self._fake_volume()
self.driver.copy_image_to_volume('', volume, '', 1)
- @patch('cinder.image.image_utils.qemu_img_info')
- @patch('cinder.image.image_utils.resize_image')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.image.image_utils.qemu_img_info')
+ @mock.patch('cinder.image.image_utils.resize_image')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_resize_volume_file_ok(self,
mock_local_path,
mock_resize_image,
self.assertEqual(self._fake_qemu_qcow2_image_info('').virtual_size,
self.driver._resize_volume_file(volume, 2000))
- @patch('cinder.image.image_utils.qemu_img_info')
- @patch('cinder.image.image_utils.resize_image')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.image.image_utils.qemu_img_info')
+ @mock.patch('cinder.image.image_utils.resize_image')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_resize_volume_file_fail(self,
mock_local_path,
mock_resize_image,
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._resize_volume_file, volume, 2000)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._resize_volume_file')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_resize_volume_file')
def test_extend_volume(self, mock_resize_volume_file):
volume = self._fake_volume()
self.driver.extend_volume(volume, 2000)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
- @patch('cinder.image.image_utils.upload_volume')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.image.image_utils.upload_volume')
def test_copy_volume_to_image(self, mock_upload_volume, mock_local_path):
volume = self._fake_volume()
self.driver.copy_volume_to_image('', volume, '', '')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file')
- @patch('cinder.openstack.common.fileutils.file_open')
- @patch('cinder.utils.temporary_chown')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._create_gpfs_clone')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._delete_gpfs_file')
+ @mock.patch('cinder.openstack.common.fileutils.file_open')
+ @mock.patch('cinder.utils.temporary_chown')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._gpfs_redirect')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_create_gpfs_clone')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_backup_volume(self,
mock_local_path,
mock_create_gpfs_clone,
mock_local_path.return_value = self.volumes_path
self.driver.backup_volume('', backup, backup_service)
- @patch('cinder.openstack.common.fileutils.file_open')
- @patch('cinder.utils.temporary_chown')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.openstack.common.fileutils.file_open')
+ @mock.patch('cinder.utils.temporary_chown')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_restore_backup(self,
mock_local_path,
mock_temp_chown,
mock_local_path.return_value = self.volumes_path
self.driver.restore_backup('', backup, volume, backup_service)
- @patch('cinder.utils.execute')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._can_migrate_locally')
+ @mock.patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_can_migrate_locally')
def test_migrate_volume_ok(self, mock_local, mock_exec):
volume = self._fake_volume()
host = {}
self.assertEqual((True, None),
self.driver._migrate_volume(volume, host))
- @patch('cinder.utils.execute')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._can_migrate_locally')
+ @mock.patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_can_migrate_locally')
def test_migrate_volume_fail_dest_path(self, mock_local, mock_exec):
volume = self._fake_volume()
host = {}
self.assertEqual((False, None),
self.driver._migrate_volume(volume, host))
- @patch('cinder.utils.execute')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._can_migrate_locally')
+ @mock.patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_can_migrate_locally')
def test_migrate_volume_fail_mpb(self, mock_local, mock_exec):
volume = self._fake_volume()
host = {}
self.assertEqual((True, None),
self.driver._migrate_volume(volume, host))
- @patch('cinder.utils.execute')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._can_migrate_locally')
+ @mock.patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_can_migrate_locally')
def test_migrate_volume_fail_mv(self, mock_local, mock_exec):
volume = self._fake_volume()
host = {}
self.assertEqual((False, None),
self.driver._migrate_volume(volume, host))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
def test_migrate_volume_ok_pub(self, mock_migrate_volume):
self.driver.migrate_volume('', '', '')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_update_volume_storage_pool')
- @patch('cinder.volume.drivers.ibm.gpfs._different')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_update_volume_storage_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_ok(self, mock_different, mock_strg_pool, mock_migrate_vol):
ctxt = self.context
(volume, new_type, diff, host) = self._fake_retype_arguments()
mock_migrate_vol.return_value = (True, True)
self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_update_volume_storage_pool')
- @patch('cinder.volume.drivers.ibm.gpfs._different')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_update_volume_storage_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_diff_backend(self,
mock_different,
mock_strg_pool,
new_type,
diff, host))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_update_volume_storage_pool')
- @patch('cinder.volume.drivers.ibm.gpfs._different')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_update_volume_storage_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_diff_pools_migrated(self,
mock_different,
mock_strg_pool,
mock_migrate_vol.return_value = (True, True)
self.assertTrue(self.driver.retype(ctxt, volume, new_type, diff, host))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_update_volume_storage_pool')
- @patch('cinder.volume.drivers.ibm.gpfs._different')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_update_volume_storage_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_diff_pools(self,
mock_different,
mock_strg_pool,
diff,
host))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
- '_update_volume_storage_pool')
- @patch('cinder.volume.drivers.ibm.gpfs._different')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._migrate_volume')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_update_volume_storage_pool')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs._different')
def test_retype_no_diff_hit(self,
mock_different,
mock_strg_pool,
diff,
host))
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_mkfs_ok(self, mock_exec):
volume = self._fake_volume()
self.driver._mkfs(volume, 'swap')
self.driver._mkfs(volume, 'ext3', 'test')
self.driver._mkfs(volume, 'vfat', 'test')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_mkfs_fail_mk(self, mock_exec):
volume = self._fake_volume()
mock_exec.side_effect = (
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._mkfs, volume, 'swap', 'test')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_get_available_capacity_ok(self, mock_exec):
mock_exec.return_value = ('Filesystem 1-blocks Used '
'Available Capacity Mounted on\n'
self.assertEqual((10192683008, 10737418240),
self.driver._get_available_capacity('/gpfs0'))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._verify_gpfs_path_state')
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.'
+ '_verify_gpfs_path_state')
+ @mock.patch('cinder.utils.execute')
def test_get_available_capacity_fail_mounted(self,
mock_exec,
mock_path_state):
'10192683008 6%% /gpfs0', '')
self.assertEqual((0, 0), self.driver._get_available_capacity('/gpfs0'))
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_verify_gpfs_path_state_ok(self, mock_is_gpfs_path):
self.driver._verify_gpfs_path_state(self.images_dir)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver._is_gpfs_path')
def test_verify_gpfs_path_state_fail_path(self, mock_is_gpfs_path):
mock_is_gpfs_path.side_effect = (
processutils.ProcessExecutionError(stdout='test', stderr='test'))
self.assertRaises(exception.VolumeBackendAPIException,
self.driver._verify_gpfs_path_state, self.images_dir)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_create_consistencygroup(self, mock_exec):
ctxt = self.context
group = self._fake_group()
cmd = ['chmod', '770', cgpath]
mock_exec.assert_any_call(*cmd)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_create_consistencygroup_fail(self, mock_exec):
ctxt = self.context
group = self._fake_group()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.create_consistencygroup, ctxt, group)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_delete_consistencygroup(self, mock_exec):
ctxt = self.context
group = self._fake_group()
cmd = ['mmdelfileset', fsdev, cgname, '-f']
mock_exec.assert_any_call(*cmd)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_delete_consistencygroup_fail(self, mock_exec):
ctxt = self.context
group = self._fake_group()
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.delete_consistencygroup, ctxt, group)
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot')
def test_create_cgsnapshot(self, mock_create_snap):
ctxt = self.context
cgsnap = self._fake_cgsnapshot()
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.create_snapshot')
def test_create_cgsnapshot_empty(self, mock_create_snap):
ctxt = self.context
cgsnap = self._fake_cgsnapshot()
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot')
def test_delete_cgsnapshot(self, mock_delete_snap):
ctxt = self.context
cgsnap = self._fake_cgsnapshot()
self.driver.db.snapshot_get_all_for_cgsnapshot.\
assert_called_once_with(ctxt, cgsnap['id'])
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.delete_snapshot')
def test_delete_cgsnapshot_empty(self, mock_delete_snap):
ctxt = self.context
cgsnap = self._fake_cgsnapshot()
ret = self.driver.local_path(volume)
self.assertEqual(ret, volume_path)
- @patch('cinder.context.get_admin_context')
- @patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
+ @mock.patch('cinder.context.get_admin_context')
+ @mock.patch('cinder.volume.drivers.ibm.gpfs.GPFSDriver.local_path')
def test_get_snapshot_path(self, mock_local_path, mock_admin_context):
volume = self._fake_volume()
self.driver.db = mock.Mock()
ret, os.path.join(os.path.dirname(volume_path), snapshot['name'])
)
- @patch('cinder.utils.execute')
+ @mock.patch('cinder.utils.execute')
def test_gpfs_full_copy(self, mock_exec):
src = "/tmp/vol1"
dest = "/tmp/vol2"
"""
import os
-from StringIO import StringIO
+import StringIO
import tempfile
import mock
def test_read_config(self, m_access, m_open):
# Test exception when file is not found
m_access.return_value = False
- m_open.return_value = StringIO(HNASCONF)
+ m_open.return_value = StringIO.StringIO(HNASCONF)
self.assertRaises(exception.NotFound, iscsi._read_config, '')
# Test exception when config file has parsing errors
# due to missing <svc> tag
m_access.return_value = True
- m_open.return_value = StringIO(HNAS_WRONG_CONF1)
+ m_open.return_value = StringIO.StringIO(HNAS_WRONG_CONF1)
self.assertRaises(exception.ConfigNotFound, iscsi._read_config, '')
# Test exception when config file has parsing errors
# due to missing <hdp> tag
- m_open.return_value = StringIO(HNAS_WRONG_CONF2)
+ m_open.return_value = StringIO.StringIO(HNAS_WRONG_CONF2)
self.configuration.hds_hnas_iscsi_config_file = ''
self.assertRaises(exception.ParameterNotFound, iscsi._read_config, '')
#
import os
-from StringIO import StringIO
+import StringIO
import tempfile
import mock
def test_read_config(self, m_access, m_open):
# Test exception when file is not found
m_access.return_value = False
- m_open.return_value = StringIO(HNASCONF)
+ m_open.return_value = StringIO.StringIO(HNASCONF)
self.assertRaises(exception.NotFound, nfs._read_config, '')
# Test exception when config file has parsing errors
# due to missing <svc> tag
m_access.return_value = True
- m_open.return_value = StringIO(HNAS_WRONG_CONF1)
+ m_open.return_value = StringIO.StringIO(HNAS_WRONG_CONF1)
self.assertRaises(exception.ConfigNotFound, nfs._read_config, '')
# Test exception when config file has parsing errors
# due to missing <hdp> tag
- m_open.return_value = StringIO(HNAS_WRONG_CONF2)
+ m_open.return_value = StringIO.StringIO(HNAS_WRONG_CONF2)
self.configuration.hds_hnas_iscsi_config_file = ''
self.assertRaises(exception.ParameterNotFound, nfs._read_config, '')
import shutil
import tempfile
import time
-from xml.dom.minidom import Document
+from xml.dom import minidom
import mock
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file
"""
- doc = Document()
+ doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
file is used to set the Huawei storage custom parameters, therefore,
in the UT test we need to simulate such a configuration file
"""
- doc = Document()
+ doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
import socket
import tempfile
import time
-from xml.dom.minidom import Document
+from xml.dom import minidom
from xml.etree import ElementTree as ET
import mox
from cinder import ssh_utils
from cinder import test
from cinder.volume import configuration as conf
+from cinder.volume.drivers import huawei
from cinder.volume.drivers.huawei import huawei_utils
-from cinder.volume.drivers.huawei import HuaweiVolumeDriver
from cinder.volume.drivers.huawei import ssh_common
from cinder.volume import volume_types
def create_fake_conf_file(filename):
- doc = Document()
+ doc = minidom.Document()
config = doc.createElement('config')
doc.appendChild(config)
def _init_driver(self):
Curr_test[0] = 'T'
- self.driver = HuaweiVolumeDriver(configuration=self.configuration)
+ self.driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.driver.do_setup(None)
def test_conf_invalid(self):
tmp_configuration.cinder_huawei_conf_file = tmp_fonf_file
tmp_configuration.append_config_values(mox.IgnoreArg())
self.assertRaises(IOError,
- HuaweiVolumeDriver,
+ huawei.HuaweiVolumeDriver,
configuration=tmp_configuration)
# Test Product and Protocol invalid
tmp_dict = {'Storage/Product': 'T', 'Storage/Protocol': 'iSCSI'}
for k, v in tmp_dict.items():
modify_conf(self.fake_conf_file, k, 'xx')
self.assertRaises(exception.InvalidInput,
- HuaweiVolumeDriver,
+ huawei.HuaweiVolumeDriver,
configuration=self.configuration)
modify_conf(self.fake_conf_file, k, v)
# Test ctr ip, UserName and password unspecified
'Storage/UserPassword': '123456'}
for k, v in tmp_dict.items():
modify_conf(self.fake_conf_file, k, '')
- tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
+ tmp_driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, k, v)
# Test StoragePool unspecified
modify_conf(self.fake_conf_file, 'LUN/StoragePool', '', attrib='Name')
- tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
+ tmp_driver = huawei. HuaweiVolumeDriver(
+ configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, 'LUN/StoragePool', 'RAID_001',
attrib='Name')
# Test LUN type invalid
modify_conf(self.fake_conf_file, 'LUN/LUNType', 'thick')
- tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
+ tmp_driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
tmp_driver.do_setup(None)
self.assertRaises(exception.InvalidInput,
tmp_driver.create_volume, FAKE_VOLUME)
# Test OSType invalid
modify_conf(self.fake_conf_file, 'Host', 'invalid_type',
attrib='OSType')
- tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
+ tmp_driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.assertRaises(exception.InvalidInput,
tmp_driver.do_setup, None)
modify_conf(self.fake_conf_file, 'Host', 'Linux', attrib='OSType')
# Test TargetIP not found
modify_conf(self.fake_conf_file, 'iSCSI/DefaultTargetIP', '')
modify_conf(self.fake_conf_file, 'iSCSI/Initiator', '', attrib='Name')
- tmp_driver = HuaweiVolumeDriver(configuration=self.configuration)
+ tmp_driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
tmp_driver.do_setup(None)
tmp_driver.create_volume(FAKE_VOLUME)
self.assertRaises(exception.InvalidInput,
def _init_driver(self):
Curr_test[0] = 'T'
- self.driver = HuaweiVolumeDriver(configuration=self.configuration)
+ self.driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.driver.do_setup(None)
def test_validate_connector_failed(self):
def _init_driver(self):
Curr_test[0] = 'Dorado5100'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
- self.driver = HuaweiVolumeDriver(configuration=self.configuration)
+ self.driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.driver.do_setup(None)
def test_create_cloned_volume(self):
def _init_driver(self):
Curr_test[0] = 'Dorado2100G2'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
- self.driver = HuaweiVolumeDriver(configuration=self.configuration)
+ self.driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.driver.do_setup(None)
def test_create_cloned_volume(self):
def _init_driver(self):
Curr_test[0] = 'Dorado5100'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
- self.driver = HuaweiVolumeDriver(configuration=self.configuration)
+ self.driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.driver.do_setup(None)
def test_create_delete_cloned_volume(self):
def _init_driver(self):
Curr_test[0] = 'Dorado2100G2'
modify_conf(self.fake_conf_file, 'Storage/Product', 'Dorado')
- self.driver = HuaweiVolumeDriver(configuration=self.configuration)
+ self.driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.driver.do_setup(None)
def test_conf_invalid(self):
self.stubs.Set(ssh_common.TseriesCommon, '_change_file_mode',
Fake_change_file_mode)
Curr_test[0] = 'T'
- self.driver = HuaweiVolumeDriver(configuration=self.configuration)
+ self.driver = huawei.HuaweiVolumeDriver(
+ configuration=self.configuration)
self.driver.do_setup(None)
def test_reach_max_connection_limit(self):
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
-from cinder.volume.drivers.netapp.options import netapp_7mode_opts
-from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
-from cinder.volume.drivers.netapp.options import netapp_cluster_opts
-from cinder.volume.drivers.netapp.options import netapp_connection_opts
-from cinder.volume.drivers.netapp.options import netapp_provisioning_opts
-from cinder.volume.drivers.netapp.options import netapp_transport_opts
+from cinder.volume.drivers.netapp import options
from cinder.volume.drivers.netapp import utils
def create_configuration():
configuration = conf.Configuration(None)
- configuration.append_config_values(netapp_connection_opts)
- configuration.append_config_values(netapp_transport_opts)
- configuration.append_config_values(netapp_basicauth_opts)
- configuration.append_config_values(netapp_cluster_opts)
- configuration.append_config_values(netapp_7mode_opts)
- configuration.append_config_values(netapp_provisioning_opts)
+ configuration.append_config_values(options.netapp_connection_opts)
+ configuration.append_config_values(options.netapp_transport_opts)
+ configuration.append_config_values(options.netapp_basicauth_opts)
+ configuration.append_config_values(options.netapp_cluster_opts)
+ configuration.append_config_values(options.netapp_7mode_opts)
+ configuration.append_config_values(options.netapp_provisioning_opts)
return configuration
from cinder.volume.drivers.netapp import common
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import iscsi
-from cinder.volume.drivers.netapp.eseries.iscsi import LOG as driver_log
from cinder.volume.drivers.netapp.eseries import utils
-from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
-from cinder.volume.drivers.netapp.options import netapp_eseries_opts
+from cinder.volume.drivers.netapp import options
import cinder.volume.drivers.netapp.utils as na_utils
def create_configuration():
configuration = conf.Configuration(None)
- configuration.append_config_values(netapp_basicauth_opts)
- configuration.append_config_values(netapp_eseries_opts)
+ configuration.append_config_values(options.netapp_basicauth_opts)
+ configuration.append_config_values(options.netapp_eseries_opts)
return configuration
self.driver._create_volume, wrong_eseries_pool_label,
self.fake_eseries_volume_label, self.fake_size_gb)
- @mock.patch.object(driver_log, 'info')
+ @mock.patch.object(iscsi.LOG, 'info')
@mock.patch.object(client.RestClient, 'list_storage_pools')
@mock.patch.object(client.RestClient, 'create_volume',
mock.MagicMock(return_value='CorrectVolume'))
@mock.patch.object(client.RestClient, 'create_volume',
mock.MagicMock(
side_effect=exception.NetAppDriverException))
- @mock.patch.object(driver_log, 'info', mock.Mock())
+ @mock.patch.object(iscsi.LOG, 'info', mock.Mock())
def test_create_volume_check_exception(self, fake_list_pools):
fake_pool = {}
fake_pool['label'] = self.fake_eseries_pool_label
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
-from itertools import chain
+import itertools
import os
from lxml import etree
import mock
-import mox
-from mox import IgnoreArg
+import mox as mox_lib
import six
from cinder import exception
'port': 443,
'username': 'admin',
'password': 'passw0rd'}
-SEVEN_MODE_CONNECTION_INFO = dict(chain(CONNECTION_INFO.items(),
- {'vfiler': 'test_vfiler'}.items()))
+SEVEN_MODE_CONNECTION_INFO = dict(
+ itertools.chain(CONNECTION_INFO.items(),
+ {'vfiler': 'test_vfiler'}.items()))
FAKE_VSERVER = 'fake_vserver'
def create_configuration():
- configuration = mox.MockObject(conf.Configuration)
- configuration.append_config_values(mox.IgnoreArg())
+ configuration = mox_lib.MockObject(conf.Configuration)
+ configuration.append_config_values(mox_lib.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
configuration.nas_mount_options = None
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
- drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
+ drv._clone_volume(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions')
- drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
- drv._get_volume_location(IgnoreArg()).AndReturn(location)
- drv.local_path(IgnoreArg()).AndReturn('/mnt')
- drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
- drv._set_rw_permissions(IgnoreArg())
+ drv._clone_volume(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg())
+ drv._get_volume_location(mox_lib.IgnoreArg()).AndReturn(location)
+ drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt')
+ drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
+ drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
- drv._get_provider_location(IgnoreArg())
- drv._get_provider_location(IgnoreArg())
- drv._volume_not_present(IgnoreArg(), IgnoreArg())\
+ drv._get_provider_location(mox_lib.IgnoreArg())
+ drv._get_provider_location(mox_lib.IgnoreArg())
+ drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
- drv._get_volume_path(IgnoreArg(), IgnoreArg())
+ drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
- drv._post_prov_deprov_in_ssc(IgnoreArg())
+ drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
mox.ReplayAll()
'nfsvol')
drv.zapi_client.clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
- drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
- drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
- drv._post_prov_deprov_in_ssc(IgnoreArg())
+ drv._get_host_ip(mox_lib.IgnoreArg()).AndReturn('127.0.0.1')
+ drv._get_export_path(mox_lib.IgnoreArg()).AndReturn('/nfs')
+ drv._post_prov_deprov_in_ssc(mox_lib.IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
- drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
+ drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
- IgnoreArg(), r_files).AndReturn(r_files)
+ mox_lib.IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
- drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
+ drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
- drv._find_image_in_cache(IgnoreArg()).AndReturn(
+ drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn(
[('share', 'file_name')])
- drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
+ drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
- drv._find_image_in_cache(IgnoreArg()).AndReturn([])
- drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
- drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
+ drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
+ drv._is_cloneable_share(
+ mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
+ drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv.clone_image(
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
- drv._find_image_in_cache(IgnoreArg()).AndReturn([])
- drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
- drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
- drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
+ drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
+ drv._is_cloneable_share(
+ mox_lib.IgnoreArg()).AndReturn('127.0.0.1:/share')
+ drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn(True)
+ drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
- drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
- drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
+ drv._get_mount_point_for_share(mox_lib.IgnoreArg()).AndReturn('/mnt')
+ drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
- drv._resize_image_file({'name': 'vol'}, IgnoreArg())
+ drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
- drv._find_image_in_cache(IgnoreArg()).AndReturn([])
+ drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
- drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
+ drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
- image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw',
- run_as_root=True)
+ image_utils.convert_image(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg(),
+ 'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
- drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
+ drv._register_image_in_cache(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
- drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
+ drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
- drv._resize_image_file({'name': 'vol'}, IgnoreArg())
+ drv._resize_image_file({'name': 'vol'}, mox_lib.IgnoreArg())
mox.ReplayAll()
drv.clone_image(
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
- drv._find_image_in_cache(IgnoreArg()).AndReturn([])
+ drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
- drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
+ drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
- image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw',
- run_as_root=True)
+ image_utils.convert_image(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg(),
+ 'raw', run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
- drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
- drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
- drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
- drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
+ drv._register_image_in_cache(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg())
+ drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
+ drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(False)
+ drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
- drv._find_image_in_cache(IgnoreArg()).AndReturn([])
+ drv._find_image_in_cache(mox_lib.IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
- drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
+ drv._is_share_vol_compatible(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id', run_as_root=True).\
AndReturn(self.get_img_info('notraw'))
- image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw',
+ image_utils.convert_image(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg(), 'raw',
run_as_root=True)
image_utils.qemu_img_info('/mnt/vol', run_as_root=True).\
AndReturn(self.get_img_info('raw'))
- drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
- drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
- drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
+ drv._register_image_in_cache(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg())
+ drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
+ drv._discover_file_till_timeout(mox_lib.IgnoreArg()).AndReturn(True)
drv._set_rw_permissions('/mnt/vol')
drv._resize_image_file(
- IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
- drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
+ mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndRaise(exception.InvalidResults())
+ drv.local_path(mox_lib.IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
- drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
+ drv._check_share_in_use(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
- drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
+ drv._check_share_in_use(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
- drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
+ drv._check_share_in_use(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
- drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
+ drv._check_share_in_use(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
- drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
+ drv._check_share_in_use(mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
- utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
+ utils.resolve_hostname(mox_lib.IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
- utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
+ utils.resolve_hostname(mox_lib.IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
self._driver.zapi_client = mock.Mock()
- @mock.patch.object(netapp_nfs_cmode, 'get_volume_extra_specs')
+ @mock.patch.object(utils, 'get_volume_extra_specs')
@mock.patch.object(utils, 'LOG', mock.Mock())
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:raid_type': 'raid4'}
mock_volume_extra_specs = mock.Mock()
- self.mock_object(netapp_nfs_cmode,
+ self.mock_object(utils,
'get_volume_extra_specs',
mock_volume_extra_specs)
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
host = 'hostname@backend#' + fake_share
mock_volume_extra_specs = mock.Mock()
- self.mock_object(netapp_nfs_cmode,
+ self.mock_object(utils,
'get_volume_extra_specs',
mock_volume_extra_specs)
mock_volume_extra_specs.return_value = extra_specs
self.assertRaises(exception.InvalidHost,
self._driver.create_volume, FakeVolume(host, 1))
- @mock.patch.object(netapp_nfs_cmode, 'get_volume_extra_specs')
+ @mock.patch.object(utils, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
- drv._get_provider_location(IgnoreArg())
- drv._volume_not_present(IgnoreArg(), IgnoreArg())\
+ drv._get_provider_location(mox_lib.IgnoreArg())
+ drv._volume_not_present(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
- drv._get_volume_path(IgnoreArg(), IgnoreArg())
+ drv._get_volume_path(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
mox.StubOutWithMock(drv, '_get_export_ip_path')
drv._get_export_ip_path(
- IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
+ mox_lib.IgnoreArg(),
+ mox_lib.IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
return mox
def test_clone_volume_clear(self):
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
- drv.zapi_client.clone_file(IgnoreArg(), IgnoreArg())
+ drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
drv.zapi_client = mox.CreateMockAnything()
drv.zapi_client.get_actual_path_for_export('/nfs').AndReturn(
'/vol/vol1/nfs')
- drv.zapi_client.clone_file(IgnoreArg(), IgnoreArg())
+ drv.zapi_client.clone_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
mox.ReplayAll()
import httplib
from lxml import etree
-from mox import IgnoreArg
+import mox
import six
from cinder import exception
mirrored)
raiddp = {'ha_policy': 'cfo', 'raid_type': 'raiddp'}
ssc_cmode.query_aggr_options(
- na_server, IgnoreArg()).AndReturn(raiddp)
+ na_server, mox.IgnoreArg()).AndReturn(raiddp)
ssc_cmode.query_aggr_storage_disk(
- na_server, IgnoreArg()).AndReturn('SSD')
+ na_server, mox.IgnoreArg()).AndReturn('SSD')
raid4 = {'ha_policy': 'cfo', 'raid_type': 'raid4'}
ssc_cmode.query_aggr_options(
- na_server, IgnoreArg()).AndReturn(raid4)
+ na_server, mox.IgnoreArg()).AndReturn(raid4)
ssc_cmode.query_aggr_storage_disk(
- na_server, IgnoreArg()).AndReturn('SAS')
+ na_server, mox.IgnoreArg()).AndReturn('SAS')
self.mox.ReplayAll()
res_vols = ssc_cmode.get_cluster_vols_with_ssc(
import mock
import mox as mox_lib
-from mox import IgnoreArg
-from mox import IsA
from mox import stubout
from oslo_utils import units
self.configuration.nfs_shares_config = self.TEST_SHARES_CONFIG_FILE
self.assertRaises(exception.NfsException,
- drv.do_setup, IsA(context.RequestContext))
+ drv.do_setup, mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_oversub_ratio_less_than_zero(self):
"""do_setup should throw error if nfs_oversub_ratio is less than 0."""
self.configuration.nfs_oversub_ratio = -1
self.assertRaises(exception.NfsException,
drv.do_setup,
- IsA(context.RequestContext))
+ mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_used_ratio_less_than_zero(self):
"""do_setup should throw error if nfs_used_ratio is less than 0."""
self.configuration.nfs_used_ratio = -1
self.assertRaises(exception.NfsException,
drv.do_setup,
- IsA(context.RequestContext))
+ mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_error_if_used_ratio_greater_than_one(self):
"""do_setup should throw error if nfs_used_ratio is greater than 1."""
self.configuration.nfs_used_ratio = 2
self.assertRaises(exception.NfsException,
drv.do_setup,
- IsA(context.RequestContext))
+ mox_lib.IsA(context.RequestContext))
def test_setup_should_throw_exception_if_nfs_client_is_not_installed(self):
"""do_setup should throw error if nfs client is not installed."""
mox.ReplayAll()
self.assertRaises(exception.NfsException,
- drv.do_setup, IsA(context.RequestContext))
+ drv.do_setup, mox_lib.IsA(context.RequestContext))
mox.VerifyAll()
mox.StubOutWithMock(drv, '_create_sparsed_file')
mox.StubOutWithMock(drv, '_set_rw_permissions')
- drv._create_sparsed_file(IgnoreArg(), IgnoreArg())
- drv._set_rw_permissions(IgnoreArg())
+ drv._create_sparsed_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
+ drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
mox.StubOutWithMock(drv, '_create_regular_file')
mox.StubOutWithMock(drv, '_set_rw_permissions')
- drv._create_regular_file(IgnoreArg(), IgnoreArg())
- drv._set_rw_permissions(IgnoreArg())
+ drv._create_regular_file(mox_lib.IgnoreArg(), mox_lib.IgnoreArg())
+ drv._set_rw_permissions(mox_lib.IgnoreArg())
mox.ReplayAll()
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.image import fake as fake_image
-from cinder.tests.test_volume import DriverTestCase
+from cinder.tests import test_volume
from cinder.volume import configuration as conf
import cinder.volume.drivers.rbd as driver
from cinder.volume.flows.manager import create_volume
self.mock_rbd_wrapper.close()
-class ManagedRBDTestCase(DriverTestCase):
+class ManagedRBDTestCase(test_volume.DriverTestCase):
driver_name = "cinder.volume.drivers.rbd.RBDDriver"
def setUp(self):
from cinder.image import image_utils
from cinder import test
-from cinder.volume.drivers.sheepdog import SheepdogDriver
+from cinder.volume.drivers import sheepdog
COLLIE_NODE_INFO = """
class SheepdogTestCase(test.TestCase):
def setUp(self):
super(SheepdogTestCase, self).setUp()
- self.driver = SheepdogDriver()
+ self.driver = sheepdog.SheepdogDriver()
def test_update_volume_stats(self):
def fake_stats(*args):
lambda w, x, y, z: None)
self.stubs.Set(image_utils, 'convert_image',
lambda x, y, z: None)
- self.stubs.Set(SheepdogDriver, '_try_execute', fake_try_execute)
+ self.stubs.Set(sheepdog.SheepdogDriver,
+ '_try_execute',
+ fake_try_execute)
self.driver.copy_image_to_volume(None, {'name': 'test',
'size': 1},
FakeImageService(), None)
'id': ss_uuid,
'size': fake_size}
- with mock.patch.object(SheepdogDriver, '_try_execute') as mock_exe:
+ with mock.patch.object(sheepdog.SheepdogDriver,
+ '_try_execute') as mock_exe:
self.driver.create_volume_from_snapshot(fake_vol, fake_snapshot)
args = ['qemu-img', 'create', '-b',
"sheepdog:%s:%s" % (fake_snapshot['volume_name'],
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
-from cinder.volume.drivers.solidfire import SolidFireDriver
+from cinder.volume.drivers import solidfire
from cinder.volume import qos_specs
from cinder.volume import volume_types
self.configuration.sf_allow_template_caching = False
super(SolidFireVolumeTestCase, self).setUp()
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
- self.stubs.Set(SolidFireDriver, '_build_endpoint_info',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_build_endpoint_info',
self.fake_build_endpoint_info)
self.expected_qos_results = {'minIOPS': 1000,
return {'fake': 'fake-model'}
def test_create_with_qos_type(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
- self.stubs.Set(SolidFireDriver, '_set_qos_by_volume_type',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_set_qos_by_volume_type',
self.fake_set_qos_by_volume_type)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'volume_type_id': 'fast',
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
def test_create_volume(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
self.assertIsNone(model_update.get('provider_geometry', None))
def test_create_volume_non_512(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'created_at': timeutils.utcnow()}
self.configuration.sf_emulate_512 = False
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertEqual(model_update.get('provider_geometry', None),
'4096 4096')
self.configuration.sf_emulate_512 = True
def test_create_snapshot(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
- self.stubs.Set(SolidFireDriver, '_get_model_info',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_get_model_info',
self.fake_get_model_info)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.create_volume(testvol)
sfv.create_snapshot(testsnap)
def test_create_clone(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
- self.stubs.Set(SolidFireDriver, '_get_model_info',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_get_model_info',
self.fake_get_model_info)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.create_cloned_volume(testvol_b, testvol)
def test_initialize_connector_with_blocksizes(self):
'created_at': timeutils.utcnow(),
}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
properties = sfv.initialize_connection(testvol, connector)
self.assertEqual('4096', properties['data']['physical_block_size'])
self.assertEqual('4096', properties['data']['logical_block_size'])
def test_create_volume_with_qos(self):
preset_qos = {}
preset_qos['qos'] = 'fast'
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'volume_type_id': None,
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.create_volume(testvol)
self.assertIsNotNone(model_update)
def test_create_volume_fails(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
- self.stubs.Set(SolidFireDriver, '_update_cluster_status',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_update_cluster_status',
self.fake_update_cluster_status)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
try:
sfv.create_volume(testvol)
self.fail("Should have thrown Error")
pass
def test_create_sfaccount(self):
- sfv = SolidFireDriver(configuration=self.configuration)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
account = sfv._create_sfaccount('project-id')
self.assertIsNotNone(account)
def test_create_sfaccount_fails(self):
- sfv = SolidFireDriver(configuration=self.configuration)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._create_sfaccount('project-id')
self.assertIsNone(account)
def test_get_sfaccount_by_name(self):
- sfv = SolidFireDriver(configuration=self.configuration)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
account = sfv._get_sfaccount_by_name('some-name')
self.assertIsNotNone(account)
def test_get_sfaccount_by_name_fails(self):
- sfv = SolidFireDriver(configuration=self.configuration)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request_fails)
account = sfv._get_sfaccount_by_name('some-name')
self.assertIsNone(account)
def test_delete_volume(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.delete_volume(testvol)
def test_delete_volume_fails_no_volume(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
try:
sfv.delete_volume(testvol)
self.fail("Should have thrown Error")
def test_delete_volume_fails_account_lookup(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
- self.stubs.Set(SolidFireDriver, '_update_cluster_status',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_update_cluster_status',
self.fake_update_cluster_status)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAccountNotFound,
sfv.delete_volume,
testvol)
def test_get_cluster_info(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv._get_cluster_info()
def test_get_cluster_info_fail(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is inentional for this test
- self.stubs.Set(SolidFireDriver, '_update_cluster_status',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_update_cluster_status',
self.fake_update_cluster_status)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request_fails)
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAPIException,
sfv._get_cluster_info)
def test_extend_volume(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv.extend_volume(testvol, 2)
def test_extend_volume_fails_no_volume(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'size': 1,
'id': 'not-found'}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.VolumeNotFound,
sfv.extend_volume,
testvol, 2)
def test_extend_volume_fails_account_lookup(self):
# NOTE(JDG) This test just fakes update_cluster_status
# this is intentional for this test
- self.stubs.Set(SolidFireDriver, '_update_cluster_status',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_update_cluster_status',
self.fake_update_cluster_status)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request_fails)
testvol = {'project_id': 'testprjid',
'name': 'no-name',
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertRaises(exception.SolidFireAccountNotFound,
sfv.extend_volume,
testvol, 2)
def test_set_by_qos_spec_with_scoping(self):
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
qos_ref = qos_specs.create(self.ctxt,
'qos-specs-1', {'qos:minIOPS': '1000',
'qos:maxIOPS': '10000',
self.assertEqual(qos, self.expected_qos_results)
def test_set_by_qos_spec(self):
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
qos_ref = qos_specs.create(self.ctxt,
'qos-specs-1', {'minIOPS': '1000',
'maxIOPS': '10000',
self.assertEqual(qos, self.expected_qos_results)
def test_set_by_qos_by_type_only(self):
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:minIOPS": "100",
"qos:burstIOPS": "300",
'burstIOPS': 300})
def test_accept_transfer(self):
- sfv = SolidFireDriver(configuration=self.configuration)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
expected)
def test_accept_transfer_volume_not_found_raises(self):
- sfv = SolidFireDriver(configuration=self.configuration)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
testvol = {'project_id': 'testprjid',
'name': 'test_volume',
'new_project')
def test_retype(self):
- sfv = SolidFireDriver(configuration=self.configuration)
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:minIOPS": "500",
def _fake_get_qos_spec(ctxt, spec_id):
return test_qos_spec
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
self.stubs.Set(volume_types, 'get_volume_type',
_fake_get_volume_type)
self.stubs.Set(qos_specs, 'get_qos_specs',
_fake_get_qos_spec)
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
diff = {'encryption': {}, 'extra_specs': {},
'qos_specs': {'burstIOPS': ('10000', '2000'),
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertTrue(sfv.retype(self.ctxt,
testvol,
test_type, diff, host))
def test_update_cluster_status(self):
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
sfv._update_cluster_status()
self.assertEqual(sfv.cluster_stats['free_capacity_gb'], 99.0)
self.assertEqual(sfv.cluster_stats['total_capacity_gb'], 100.0)
'size': 1,
'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66',
'created_at': timeutils.utcnow()}
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
model_update = sfv.manage_existing(testvol, external_ref)
self.assertIsNotNone(model_update)
self.assertIsNone(model_update.get('provider_geometry', None))
def _fake_do_v_create(self, project_id, params):
return project_id, params
- self.stubs.Set(SolidFireDriver, '_issue_api_request',
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_issue_api_request',
self.fake_issue_api_request)
- self.stubs.Set(SolidFireDriver, '_do_volume_create', _fake_do_v_create)
+ self.stubs.Set(solidfire.SolidFireDriver,
+ '_do_volume_create', _fake_do_v_create)
testvol = {'project_id': 'testprjid',
'name': 'testvol',
'migration_status': 'target:'
'a720b3c0-d1f0-11e1-9b23-0800200c9a66'}
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
proj_id, sf_vol_object = sfv.create_volume(testvol)
self.assertEqual('a720b3c0-d1f0-11e1-9b23-0800200c9a66',
sf_vol_object['attributes']['uuid'])
self.assertEqual('UUID-a720b3c0-d1f0-11e1-9b23-0800200c9a66',
sf_vol_object['name'])
- @mock.patch.object(SolidFireDriver, '_issue_api_request')
- @mock.patch.object(SolidFireDriver, '_get_sfaccount')
- @mock.patch.object(SolidFireDriver, '_get_sf_volume')
- @mock.patch.object(SolidFireDriver, '_create_image_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume')
def test_verify_image_volume_out_of_date(self,
_mock_create_image_volume,
_mock_get_sf_volume,
325355)}
image_service = 'null'
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
_mock_issue_api_request.return_value = {'result': 'ok'}
sfv._verify_image_volume(self.ctxt, image_meta, image_service)
self.assertTrue(_mock_create_image_volume.called)
- @mock.patch.object(SolidFireDriver, '_issue_api_request')
- @mock.patch.object(SolidFireDriver, '_get_sfaccount')
- @mock.patch.object(SolidFireDriver, '_get_sf_volume')
- @mock.patch.object(SolidFireDriver, '_create_image_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sfaccount')
+ @mock.patch.object(solidfire.SolidFireDriver, '_get_sf_volume')
+ @mock.patch.object(solidfire.SolidFireDriver, '_create_image_volume')
def test_verify_image_volume_ok(self,
_mock_create_image_volume,
_mock_get_sf_volume,
325355)}
image_service = 'null'
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
_mock_issue_api_request.return_value = {'result': 'ok'}
sfv._verify_image_volume(self.ctxt, image_meta, image_service)
self.assertFalse(_mock_create_image_volume.called)
- @mock.patch.object(SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_clone_image_not_configured(self, _mock_issue_api_request):
_mock_issue_api_request.return_value = self.mock_stats_data
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
self.assertEqual((None, False),
sfv.clone_image(self.ctxt,
self.mock_volume,
self.fake_image_meta,
'fake'))
- @mock.patch.object(SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_clone_image_authorization(self, _mock_issue_api_request):
_mock_issue_api_request.return_value = self.mock_stats_data
self.configuration.sf_allow_template_caching = True
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
# Make sure if it's NOT public and we're NOT the owner it
# doesn't try and cache
self.mock_volume, 'fake',
_fake_image_meta, 'fake')
- @mock.patch.object(SolidFireDriver, '_issue_api_request')
+ @mock.patch.object(solidfire.SolidFireDriver, '_issue_api_request')
def test_clone_image_virt_size_not_set(self, _mock_issue_api_request):
_mock_issue_api_request.return_value = self.mock_stats_data
self.configuration.sf_allow_template_caching = True
- sfv = SolidFireDriver(configuration=self.configuration)
+ sfv = solidfire.SolidFireDriver(configuration=self.configuration)
# Don't run clone_image if virtual_size property not on image
_fake_image_meta = {'id': '17c550bb-a411-44c0-9aaf-0d96dd47f501',
Test suite for VMware VMDK driver.
"""
-from distutils.version import LooseVersion
+from distutils import version as ver
import mock
import mox
def test_get_vc_version(self, session):
# test config overrides fetching from VC server
version = self._driver._get_vc_version()
- self.assertEqual(LooseVersion(self.DEFAULT_VC_VERSION), version)
+ self.assertEqual(ver.LooseVersion(self.DEFAULT_VC_VERSION), version)
# explicitly remove config entry
self._driver.configuration.vmware_host_version = None
session.return_value.vim.service_content.about.version = '6.0.1'
version = self._driver._get_vc_version()
- self.assertEqual(LooseVersion('6.0.1'), version)
+ self.assertEqual(ver.LooseVersion('6.0.1'), version)
@mock.patch('cinder.volume.drivers.vmware.vmdk.VMwareVcVmdkDriver.'
'_get_vc_version')
def test_do_setup_with_pbm_disabled(self, session, get_vc_version):
session_obj = mock.Mock(name='session')
session.return_value = session_obj
- get_vc_version.return_value = LooseVersion('5.0')
+ get_vc_version.return_value = ver.LooseVersion('5.0')
self._driver.do_setup(mock.ANY)
'_get_vc_version')
def test_do_setup_with_invalid_pbm_wsdl(self, get_vc_version,
get_pbm_wsdl_location):
- vc_version = LooseVersion('5.5')
+ vc_version = ver.LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = None
session_obj = mock.Mock(name='session')
session.return_value = session_obj
- vc_version = LooseVersion('5.5')
+ vc_version = ver.LooseVersion('5.5')
get_vc_version.return_value = vc_version
get_pbm_wsdl_location.return_value = 'file:///pbm.wsdl'
import os
import shutil
import socket
-from sys import platform
+import sys
import tempfile
import eventlet
import cinder.policy
from cinder import quota
from cinder import test
-from cinder.tests.brick.fake_lvm import FakeBrickLVM
+from cinder.tests.brick import fake_lvm
from cinder.tests import conf_fixture
from cinder.tests import fake_driver
from cinder.tests import fake_notifier
from cinder.volume import configuration as conf
from cinder.volume import driver
from cinder.volume.drivers import lvm
-from cinder.volume.manager import VolumeManager
+from cinder.volume import manager as vol_manager
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume.targets import tgt
from cinder.volume import utils as volutils
CONF = cfg.CONF
ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor'
-PLATFORM = platform
+PLATFORM = sys.platform
FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaa'
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.return_value = fake_capabilities
- manager = VolumeManager()
+ manager = vol_manager.VolumeManager()
manager.stats = {'pools': {}}
manager.driver.set_initialized()
manager.publish_service_capabilities(self.context)
def test_extra_capabilities_fail(self):
with mock.patch.object(jsonutils, 'loads') as mock_loads:
mock_loads.side_effect = exception.CinderException('test')
- self.assertRaises(exception.CinderException, VolumeManager)
+ self.assertRaises(exception.CinderException,
+ vol_manager.VolumeManager)
def test_delete_busy_volume(self):
"""Test volume survives deletion if driver reports it as busy."""
def test_delete_busy_snapshot(self):
"""Test snapshot can be created and deleted."""
- self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
- False,
- None,
- 'default')
+ self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
+ False,
+ None,
+ 'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.volume.delete_snapshot(self.context, snapshot_id)
self.volume.delete_volume(self.context, volume_id)
- @test.testtools.skipIf(platform == "darwin", "SKIP on OSX")
+ @test.testtools.skipIf(sys.platform == "darwin", "SKIP on OSX")
def test_delete_no_dev_fails(self):
"""Test delete snapshot with no dev file fails."""
self.stubs.Set(os.path, 'exists', lambda x: False)
- self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
- False,
- None,
- 'default')
+ self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
+ False,
+ None,
+ 'default')
volume = tests_utils.create_volume(self.context, **self.volume_params)
volume_id = volume['id']
self.stubs.Set(self.volume.driver, '_delete_volume',
lambda x: False)
- self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
- False,
- None,
- 'default')
+ self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
+ False,
+ None,
+ 'default')
self.stubs.Set(self.volume.driver.vg, 'lv_has_snapshot',
lambda x: True)
'cinder-volumes:default:0' % hostname}
host = {'capabilities': capabilities}
vol = {'name': 'test', 'id': 1, 'size': 1, 'status': 'available'}
- self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
- False,
- None,
- 'default')
+ self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
+ False,
+ None,
+ 'default')
self.assertRaises(exception.VolumeBackendAPIException,
self.volume.driver.migrate_volume, self.context,
self.stubs.Set(volutils, 'get_all_volume_groups',
get_all_volume_groups)
- self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
- False,
- None,
- 'default')
+ self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
+ False,
+ None,
+ 'default')
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.stubs.Set(self.volume.driver, 'create_export',
lambda x, y, vg='vg': None)
- self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
- False,
- None,
- 'default')
+ self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
+ False,
+ None,
+ 'default')
moved, model_update = self.volume.driver.migrate_volume(self.context,
vol, host)
self.assertEqual(moved, True)
def _setup_stubs_for_manage_existing(self):
"""Helper to set up common stubs for the manage_existing tests."""
- self.volume.driver.vg = FakeBrickLVM('cinder-volumes',
- False,
- None,
- 'default')
+ self.volume.driver.vg = fake_lvm.FakeBrickLVM('cinder-volumes',
+ False,
+ None,
+ 'default')
self.stubs.Set(self.volume.driver.vg, 'get_volume',
self._get_manage_existing_lvs)
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
-from cinder.volume.drivers.zadara import zadara_opts
-from cinder.volume.drivers.zadara import ZadaraVPSAISCSIDriver
+from cinder.volume.drivers import zadara
LOG = logging.getLogger("cinder.volume.driver")
RUNTIME_VARS = copy.deepcopy(DEFAULT_RUNTIME_VARS)
self.configuration = conf.Configuration(None)
- self.configuration.append_config_values(zadara_opts)
+ self.configuration.append_config_values(zadara.zadara_opts)
self.configuration.reserved_percentage = 10
self.configuration.zadara_user = 'test'
self.configuration.zadara_password = 'test_password'
self.configuration.zadara_vpsa_poolname = 'pool-0001'
- self.driver = ZadaraVPSAISCSIDriver(configuration=self.configuration)
+ self.driver = zadara.ZadaraVPSAISCSIDriver(
+ configuration=self.configuration)
self.stubs.Set(httplib, 'HTTPConnection', FakeHTTPConnection)
self.stubs.Set(httplib, 'HTTPSConnection', FakeHTTPSConnection)
self.driver.do_setup(None)
Unit tests for Oracle's ZFSSA Cinder volume driver
"""
-import mock
-
-from json import JSONEncoder
+import json
+import mock
from oslo_utils import units
from cinder.openstack.common import log as logging
def get(self, path, **kwargs):
result = client.RestResult()
result.status = client.Status.OK
- result.data = JSONEncoder().encode({'group':
- {'initiators':
- ['iqn.1-0.org.deb:01:d7']}})
+ result.data = json.JSONEncoder().encode({'group':
+ {'initiators':
+ ['iqn.1-0.org.deb:01:d7']}})
return result
def put(self, path, body="", **kwargs):
from cinder.i18n import _
from cinder import test
-from cinder.volume.drivers.netapp.dataontap.client.api import NaElement
-from cinder.volume.drivers.netapp.dataontap.client.api import NaServer
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
class NetAppApiElementTransTests(test.TestCase):
def test_translate_struct_dict_unique_key(self):
"""Tests if dict gets properly converted to NaElements."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 3)
def test_translate_struct_dict_nonunique_key(self):
"""Tests if list/dict gets properly converted to NaElements."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
child = [{'e1': 'v1', 'e2': 'v2'}, {'e1': 'v3'}]
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 3)
def test_translate_struct_list(self):
"""Tests if list gets properly converted to NaElements."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
child = ['e1', 'e2']
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 2)
def test_translate_struct_tuple(self):
"""Tests if tuple gets properly converted to NaElements."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
child = ('e1', 'e2')
root.translate_struct(child)
self.assertEqual(len(root.get_children()), 2)
def test_translate_invalid_struct(self):
"""Tests if invalid data structure raises exception."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
child = 'random child element'
self.assertRaises(ValueError, root.translate_struct, child)
def test_setter_builtin_types(self):
"""Tests str, int, float get converted to NaElement."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
root['e1'] = 'v1'
root['e2'] = 1
root['e3'] = 2.0
def test_setter_na_element(self):
"""Tests na_element gets appended as child."""
- root = NaElement('root')
- root['e1'] = NaElement('nested')
+ root = netapp_api.NaElement('root')
+ root['e1'] = netapp_api.NaElement('nested')
self.assertEqual(len(root.get_children()), 1)
e1 = root.get_child_by_name('e1')
- self.assertIsInstance(e1, NaElement)
- self.assertIsInstance(e1.get_child_by_name('nested'), NaElement)
+ self.assertIsInstance(e1, netapp_api.NaElement)
+ self.assertIsInstance(e1.get_child_by_name('nested'),
+ netapp_api.NaElement)
def test_setter_child_dict(self):
"""Tests dict is appended as child to root."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
root['d'] = {'e1': 'v1', 'e2': 'v2'}
e1 = root.get_child_by_name('d')
- self.assertIsInstance(e1, NaElement)
+ self.assertIsInstance(e1, netapp_api.NaElement)
sub_ch = e1.get_children()
self.assertEqual(len(sub_ch), 2)
for c in sub_ch:
def test_setter_child_list_tuple(self):
"""Tests list/tuple are appended as child to root."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
root['l'] = ['l1', 'l2']
root['t'] = ('t1', 't2')
l = root.get_child_by_name('l')
- self.assertIsInstance(l, NaElement)
+ self.assertIsInstance(l, netapp_api.NaElement)
t = root.get_child_by_name('t')
- self.assertIsInstance(t, NaElement)
+ self.assertIsInstance(t, netapp_api.NaElement)
for le in l.get_children():
self.assertIn(le.get_name(), ['l1', 'l2'])
for te in t.get_children():
def test_setter_no_value(self):
"""Tests key with None value."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
root['k'] = None
self.assertIsNone(root.get_child_content('k'))
def test_setter_invalid_value(self):
"""Tests invalid value raises exception."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
try:
- root['k'] = NaServer('localhost')
+ root['k'] = netapp_api.NaServer('localhost')
except Exception as e:
if not isinstance(e, TypeError):
self.fail(_('Error not a TypeError.'))
def test_setter_invalid_key(self):
"""Tests invalid value raises exception."""
- root = NaElement('root')
+ root = netapp_api.NaElement('root')
try:
root[None] = 'value'
except Exception as e:
from cinder.tests.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
-from cinder.volume.drivers.netapp.utils import hashabledict
+from cinder.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
'transport_type': 'https',
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
- igroups = set([hashabledict(igroup) for igroup in igroups])
- expected = set([hashabledict(fake.IGROUP1)])
+ igroups = set(
+ [netapp_utils.hashabledict(igroup) for igroup in igroups])
+ expected = set([netapp_utils.hashabledict(fake.IGROUP1)])
self.assertSetEqual(igroups, expected)
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
- igroups = set([hashabledict(igroup) for igroup in igroups])
- expected = set([hashabledict(fake.IGROUP1)])
+ igroups = set(
+ [netapp_utils.hashabledict(igroup) for igroup in igroups])
+ expected = set([netapp_utils.hashabledict(fake.IGROUP1)])
self.assertSetEqual(igroups, expected)
from cinder import test
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
-from cinder.volume.drivers.netapp.utils import hashabledict
+from cinder.volume.drivers.netapp import utils as netapp_utils
CONNECTION_INFO = {'hostname': 'hostname',
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
- igroups = set([hashabledict(igroup) for igroup in igroups])
- expected = set([hashabledict(expected_igroup)])
+ igroups = set(
+ [netapp_utils.hashabledict(igroup) for igroup in igroups])
+ expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
igroups = self.client.get_igroup_by_initiators(initiators)
# make these lists of dicts comparable using hashable dictionaries
- igroups = set([hashabledict(igroup) for igroup in igroups])
- expected = set([hashabledict(expected_igroup)])
+ igroups = set(
+ [netapp_utils.hashabledict(igroup) for igroup in igroups])
+ expected = set([netapp_utils.hashabledict(expected_igroup)])
self.assertSetEqual(igroups, expected)
igroups = self.client.get_igroup_by_initiators([initiator])
# make these lists of dicts comparable using hashable dictionaries
- igroups = set([hashabledict(igroup) for igroup in igroups])
- expected = set([hashabledict(expected_igroup1),
- hashabledict(expected_igroup2)])
+ igroups = set(
+ [netapp_utils.hashabledict(igroup) for igroup in igroups])
+ expected = set([netapp_utils.hashabledict(expected_igroup1),
+ netapp_utils.hashabledict(expected_igroup2)])
self.assertSetEqual(igroups, expected)
import cinder.tests.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_7mode
-from cinder.volume.drivers.netapp.dataontap.block_7mode import \
- NetAppBlockStorage7modeLibrary as block_lib_7mode
from cinder.volume.drivers.netapp.dataontap import block_base
-from cinder.volume.drivers.netapp.dataontap.block_base import \
- NetAppBlockStorageLibrary as block_lib
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
-from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp import utils as na_utils
super(NetAppBlockStorage7modeLibraryTestCase, self).setUp()
kwargs = {'configuration': self.get_config_7mode()}
- self.library = block_lib_7mode('driver', 'protocol', **kwargs)
+ self.library = block_7mode.NetAppBlockStorage7modeLibrary(
+ 'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
- @mock.patch.object(block_lib_7mode, '_get_root_volume_name')
- @mock.patch.object(block_lib_7mode, '_do_partner_setup')
- @mock.patch.object(block_lib, 'do_setup')
+ @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
+ '_get_root_volume_name')
+ @mock.patch.object(block_7mode.NetAppBlockStorage7modeLibrary,
+ '_do_partner_setup')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_do_partner_setup,
mock_get_root_volume_name):
mock_get_root_volume_name.return_value = 'vol0'
self.assertFalse(hasattr(self.library, 'partner_zapi_client'))
- @mock.patch.object(block_lib, 'check_for_setup_error')
+ @mock.patch.object(
+ block_base.NetAppBlockStorageLibrary, 'check_for_setup_error')
def test_check_for_setup_error(self, super_check_for_setup_error):
self.zapi_client.get_ontapi_version.return_value = (1, 9)
self.assertIsNone(lun_id)
def test_find_mapped_lun_igroup_raises(self):
- self.zapi_client.get_lun_map.side_effect = NaApiError
+ self.zapi_client.get_lun_map.side_effect = netapp_api.NaApiError
initiators = fake.FC_FORMATTED_INITIATORS
- self.assertRaises(NaApiError,
+ self.assertRaises(netapp_api.NaApiError,
self.library._find_mapped_lun_igroup,
'path',
initiators)
from cinder import test
from cinder.tests.volume.drivers.netapp.dataontap import fakes as fake
from cinder.volume.drivers.netapp.dataontap import block_base
-from cinder.volume.drivers.netapp.dataontap.block_base import \
- NetAppBlockStorageLibrary as block_lib
-from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
+from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp import utils as na_utils
super(NetAppBlockStorageLibraryTestCase, self).setUp()
kwargs = {'configuration': mock.Mock()}
- self.library = block_lib('driver', 'protocol', **kwargs)
+ self.library = block_base.NetAppBlockStorageLibrary(
+ 'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
self.mock_request = mock.Mock()
def tearDown(self):
super(NetAppBlockStorageLibraryTestCase, self).tearDown()
- @mock.patch.object(block_lib, '_get_lun_attr',
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_lun_attr',
mock.Mock(return_value={'Volume': 'vol1'}))
def test_get_pool(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual(pool, 'vol1')
- @mock.patch.object(block_lib, '_get_lun_attr',
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_lun_attr',
mock.Mock(return_value=None))
def test_get_pool_no_metadata(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual(pool, None)
- @mock.patch.object(block_lib, '_get_lun_attr',
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_lun_attr',
mock.Mock(return_value=dict()))
def test_get_pool_volume_unknown(self):
pool = self.library.get_pool({'name': 'volume-fake-uuid'})
self.assertEqual(pool, None)
- @mock.patch.object(block_lib, '_create_lun', mock.Mock())
- @mock.patch.object(block_lib, '_create_lun_handle', mock.Mock())
- @mock.patch.object(block_lib, '_add_lun_to_table', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun_handle', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_add_lun_to_table', mock.Mock())
@mock.patch.object(na_utils, 'get_volume_extra_specs',
mock.Mock(return_value=None))
@mock.patch.object(block_base, 'LOG', mock.Mock())
'id': uuid.uuid4(),
'host': 'hostname@backend'}) # missing pool
- @mock.patch.object(block_lib, '_get_lun_attr')
- @mock.patch.object(block_lib, '_get_or_create_igroup')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_lun_attr')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_or_create_igroup')
def test_map_lun(self, mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
protocol = 'fcp'
self.zapi_client.map_lun.assert_called_once_with(
fake.LUN1, fake.IGROUP1_NAME, lun_id=None)
- @mock.patch.object(block_lib, '_get_lun_attr')
- @mock.patch.object(block_lib, '_get_or_create_igroup')
- @mock.patch.object(block_lib, '_find_mapped_lun_igroup')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_lun_attr')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_or_create_igroup')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_find_mapped_lun_igroup')
def test_map_lun_preexisting(self, mock_find_mapped_lun_igroup,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
mock_get_lun_attr.return_value = {'Path': fake.LUN1, 'OsType': os}
mock_get_or_create_igroup.return_value = fake.IGROUP1_NAME
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, '2')
- self.zapi_client.map_lun.side_effect = NaApiError
+ self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
lun_id = self.library._map_lun(
'fake_volume', fake.FC_FORMATTED_INITIATORS, protocol, None)
mock_find_mapped_lun_igroup.assert_called_once_with(
fake.LUN1, fake.FC_FORMATTED_INITIATORS)
- @mock.patch.object(block_lib, '_get_lun_attr')
- @mock.patch.object(block_lib, '_get_or_create_igroup')
- @mock.patch.object(block_lib, '_find_mapped_lun_igroup')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_lun_attr')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_or_create_igroup')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_find_mapped_lun_igroup')
def test_map_lun_api_error(self, mock_find_mapped_lun_igroup,
mock_get_or_create_igroup, mock_get_lun_attr):
os = 'linux'
mock_get_lun_attr.return_value = {'Path': fake.LUN1, 'OsType': os}
mock_get_or_create_igroup.return_value = fake.IGROUP1_NAME
mock_find_mapped_lun_igroup.return_value = (None, None)
- self.zapi_client.map_lun.side_effect = NaApiError
+ self.zapi_client.map_lun.side_effect = netapp_api.NaApiError
- self.assertRaises(NaApiError, self.library._map_lun, 'fake_volume',
- fake.FC_FORMATTED_INITIATORS, protocol, None)
+ self.assertRaises(netapp_api.NaApiError, self.library._map_lun,
+ 'fake_volume', fake.FC_FORMATTED_INITIATORS,
+ protocol, None)
- @mock.patch.object(block_lib, '_find_mapped_lun_igroup')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_find_mapped_lun_igroup')
def test_unmap_lun(self, mock_find_mapped_lun_igroup):
mock_find_mapped_lun_igroup.return_value = (fake.IGROUP1_NAME, 1)
self.assertRaises(NotImplementedError,
self.library._get_fc_target_wwpns)
- @mock.patch.object(block_lib, '_build_initiator_target_map')
- @mock.patch.object(block_lib, '_map_lun')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_build_initiator_target_map')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_map_lun')
def test_initialize_connection_fc(self, mock_map_lun,
mock_build_initiator_target_map):
self.maxDiff = None
mock_map_lun.assert_called_once_with(
'fake_volume', fake.FC_FORMATTED_INITIATORS, 'fcp', None)
- @mock.patch.object(block_lib, '_build_initiator_target_map')
- @mock.patch.object(block_lib, '_map_lun')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_build_initiator_target_map')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_map_lun')
def test_initialize_connection_fc_no_wwpns(
self, mock_map_lun, mock_build_initiator_target_map):
fake.FC_VOLUME,
fake.FC_CONNECTOR)
- @mock.patch.object(block_lib, '_has_luns_mapped_to_initiators')
- @mock.patch.object(block_lib, '_unmap_lun')
- @mock.patch.object(block_lib, '_get_lun_attr')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_has_luns_mapped_to_initiators')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_unmap_lun')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_lun_attr')
def test_terminate_connection_fc(self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators):
mock_unmap_lun.assert_called_once_with(fake.LUN1,
fake.FC_FORMATTED_INITIATORS)
- @mock.patch.object(block_lib, '_build_initiator_target_map')
- @mock.patch.object(block_lib, '_has_luns_mapped_to_initiators')
- @mock.patch.object(block_lib, '_unmap_lun')
- @mock.patch.object(block_lib, '_get_lun_attr')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_build_initiator_target_map')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_has_luns_mapped_to_initiators')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_unmap_lun')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_lun_attr')
def test_terminate_connection_fc_no_more_luns(
self, mock_get_lun_attr, mock_unmap_lun,
mock_has_luns_mapped_to_initiators,
self.assertDictEqual(target_info, fake.FC_TARGET_INFO_UNMAP)
- @mock.patch.object(block_lib, '_get_fc_target_wwpns')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_fc_target_wwpns')
def test_build_initiator_target_map_no_lookup_service(
self, mock_get_fc_target_wwpns):
self.assertDictEqual(fake.FC_I_T_MAP_COMPLETE, init_targ_map)
self.assertEqual(0, num_paths)
- @mock.patch.object(block_lib, '_get_fc_target_wwpns')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_fc_target_wwpns')
def test_build_initiator_target_map_with_lookup_service(
self, mock_get_fc_target_wwpns):
self.assertDictEqual(fake.FC_I_T_MAP, init_targ_map)
self.assertEqual(4, num_paths)
- @mock.patch.object(block_lib, '_create_lun', mock.Mock())
- @mock.patch.object(block_lib, '_create_lun_handle', mock.Mock())
- @mock.patch.object(block_lib, '_add_lun_to_table', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun_handle', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_add_lun_to_table', mock.Mock())
@mock.patch.object(na_utils, 'LOG', mock.Mock())
@mock.patch.object(na_utils, 'get_volume_extra_specs',
mock.Mock(return_value={'netapp:raid_type': 'raid4'}))
'Use netapp_raid_type instead.'
na_utils.LOG.warning.assert_called_once_with(warn_msg)
- @mock.patch.object(block_lib, '_create_lun', mock.Mock())
- @mock.patch.object(block_lib, '_create_lun_handle', mock.Mock())
- @mock.patch.object(block_lib, '_add_lun_to_table', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_create_lun_handle', mock.Mock())
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_add_lun_to_table', mock.Mock())
@mock.patch.object(na_utils, 'LOG', mock.Mock())
@mock.patch.object(na_utils, 'get_volume_extra_specs',
mock.Mock(return_value={'netapp_thick_provisioned':
'source-name': 'lun_path'})
self.assertEqual(1, self.zapi_client.get_lun_by_args.call_count)
- @mock.patch.object(block_lib, '_extract_lun_info',
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_extract_lun_info',
mock.Mock(return_value=block_base.NetAppLun(
'lun0', 'lun0', '3', {'UUID': 'src_id'})))
def test_get_existing_vol_manage_lun(self):
self.library._extract_lun_info.assert_called_once_with('lun0')
self.assertEqual('lun0', lun.name)
- @mock.patch.object(block_lib, '_get_existing_vol_with_manage_ref',
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ '_get_existing_vol_with_manage_ref',
mock.Mock(return_value=block_base.NetAppLun(
'handle', 'name', '1073742824', {})))
def test_manage_existing_get_size(self):
import cinder.tests.volume.drivers.netapp.dataontap.fakes as fake
import cinder.tests.volume.drivers.netapp.fakes as na_fakes
from cinder.volume.drivers.netapp.dataontap import block_base
-from cinder.volume.drivers.netapp.dataontap.block_base import \
- NetAppBlockStorageLibrary as block_lib
from cinder.volume.drivers.netapp.dataontap import block_cmode
-from cinder.volume.drivers.netapp.dataontap.block_cmode import \
- NetAppBlockStorageCmodeLibrary as block_lib_cmode
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
from cinder.volume.drivers.netapp.dataontap.client import client_base
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
super(NetAppBlockStorageCmodeLibraryTestCase, self).setUp()
kwargs = {'configuration': self.get_config_cmode()}
- self.library = block_lib_cmode('driver', 'protocol', **kwargs)
+ self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
+ 'driver', 'protocol', **kwargs)
self.library.zapi_client = mock.Mock()
self.zapi_client = self.library.zapi_client
@mock.patch.object(client_base.Client, 'get_ontapi_version',
mock.MagicMock(return_value=(1, 20)))
@mock.patch.object(na_utils, 'check_flags')
- @mock.patch.object(block_lib, 'do_setup')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary, 'do_setup')
def test_do_setup(self, super_do_setup, mock_check_flags):
context = mock.Mock()
super_do_setup.assert_called_once_with(context)
self.assertEqual(1, mock_check_flags.call_count)
- @mock.patch.object(block_lib, 'check_for_setup_error')
+ @mock.patch.object(block_base.NetAppBlockStorageLibrary,
+ 'check_for_setup_error')
@mock.patch.object(ssc_cmode, 'check_ssc_api_permissions')
def test_check_for_setup_error(self, mock_check_ssc_api_permissions,
super_check_for_setup_error):
from cinder import utils
from cinder.volume.drivers.netapp.dataontap import nfs_base
from cinder.volume.drivers.netapp import utils as na_utils
-from cinder.volume.drivers.nfs import NfsDriver as nfs_lib
+from cinder.volume.drivers import nfs
class NetAppNfsDriverTestCase(test.TestCase):
return_value=mock.Mock()):
self.driver = nfs_base.NetAppNfsDriver(**kwargs)
- @mock.patch.object(nfs_lib, 'do_setup')
+ @mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())
from cinder.volume.drivers.netapp.dataontap.client import client_cmode
from cinder.volume.drivers.netapp.dataontap import nfs_cmode
from cinder.volume.drivers.netapp import utils as na_utils
-from cinder.volume.drivers.nfs import NfsDriver as nfs_lib
+from cinder.volume.drivers import nfs
class NetAppCmodeNfsDriverTestCase(test.TestCase):
return config
@mock.patch.object(client_cmode, 'Client', mock.Mock())
- @mock.patch.object(nfs_lib, 'do_setup')
+ @mock.patch.object(nfs.NfsDriver, 'do_setup')
@mock.patch.object(na_utils, 'check_flags')
def test_do_setup(self, mock_check_flags, mock_super_do_setup):
self.driver.do_setup(mock.Mock())
fake_ctypes.c_ulong = lambda x: x
mock.patch.multiple(
- 'cinder.volume.drivers.windows.vhdutils', ctypes=fake_ctypes,
- windll=mock.DEFAULT, wintypes=mock.DEFAULT, kernel32=mock.DEFAULT,
+ 'cinder.volume.drivers.windows.vhdutils',
+ ctypes=fake_ctypes, kernel32=mock.DEFAULT,
virtdisk=mock.DEFAULT, Win32_GUID=mock.DEFAULT,
Win32_RESIZE_VIRTUAL_DISK_PARAMETERS=mock.DEFAULT,
Win32_CREATE_VIRTUAL_DISK_PARAMETERS=mock.DEFAULT,
vhdutils.VIRTUAL_DISK_ACCESS_NONE, None,
vhdutils.CREATE_VIRTUAL_DISK_FLAG_NONE, 0,
vhdutils.ctypes.byref(fake_params), None,
- vhdutils.ctypes.byref(vhdutils.wintypes.HANDLE()))
+ vhdutils.ctypes.byref(vhdutils.ctypes.wintypes.HANDLE()))
self.assertTrue(self._vhdutils._close.called)
def test_create_vhd_exception(self):
vhdutils.ctypes.byref(fake_vst),
vhdutils.ctypes.c_wchar_p(self._FAKE_VHD_PATH),
fake_access_mask, fake_open_flag, fake_params,
- vhdutils.ctypes.byref(vhdutils.wintypes.HANDLE()))
+ vhdutils.ctypes.byref(vhdutils.ctypes.wintypes.HANDLE()))
self.assertEqual(fake_device_id, fake_vst.DeviceId)
fake_info_member = vhdutils.GET_VIRTUAL_DISK_INFO_SIZE
fake_info = mock.Mock()
fake_info.VhdInfo.Size._fields_ = [
- ("VirtualSize", vhdutils.wintypes.ULARGE_INTEGER),
- ("PhysicalSize", vhdutils.wintypes.ULARGE_INTEGER)]
+ ("VirtualSize", vhdutils.ctypes.wintypes.ULARGE_INTEGER),
+ ("PhysicalSize", vhdutils.ctypes.wintypes.ULARGE_INTEGER)]
fake_info.VhdInfo.Size.VirtualSize = self._FAKE_VHD_SIZE
fake_info.VhdInfo.Size.PhysicalSize = fake_physical_size
"""Unit tests for brcd fc zone client cli."""
import mock
-from mock import patch
from oslo_concurrency import processutils
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
-from cinder.zonemanager.drivers.brocade.brcd_fc_zone_client_cli \
- import BrcdFCZoneClientCLI
+from cinder.zonemanager.drivers.brocade \
+ import brcd_fc_zone_client_cli as client_cli
import cinder.zonemanager.drivers.brocade.fc_zone_constants as ZoneConstant
LOG = logging.getLogger(__name__)
unsupported_firmware = ['Fabric OS: v6.2.1']
-class TestBrcdFCZoneClientCLI(BrcdFCZoneClientCLI, test.TestCase):
+class TestBrcdFCZoneClientCLI(client_cli.BrcdFCZoneClientCLI, test.TestCase):
def setUp(self):
super(TestBrcdFCZoneClientCLI, self).setUp()
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
- @patch.object(BrcdFCZoneClientCLI, '_get_switch_info')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [ZoneConstant.GET_ACTIVE_ZONE_CFG]
get_switch_info_mock.return_value = cfgactvshow
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(active_zoneset_returned, active_zoneset)
- @patch.object(BrcdFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.BrocadeZoningCliException,
self.get_active_zone_set)
- @mock.patch.object(BrcdFCZoneClientCLI, 'get_active_zone_set')
- @mock.patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
- @mock.patch.object(BrcdFCZoneClientCLI, '_cfg_save')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_add_zones_new_zone_no_activate(self, cfg_save_mock,
apply_zone_change_mock,
get_active_zs_mock):
self.assertEqual(3, apply_zone_change_mock.call_count)
cfg_save_mock.assert_called_once_with()
- @mock.patch.object(BrcdFCZoneClientCLI, 'get_active_zone_set')
- @mock.patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
- @mock.patch.object(BrcdFCZoneClientCLI, 'activate_zoneset')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'get_active_zone_set')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_add_zones_new_zone_activate(self, activate_zoneset_mock,
apply_zone_change_mock,
get_active_zs_mock):
activate_zoneset_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
- @mock.patch.object(BrcdFCZoneClientCLI, '_ssh_execute')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_activate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.activate_zoneset('zoneset1')
self.assertTrue(return_value)
- @mock.patch.object(BrcdFCZoneClientCLI, '_ssh_execute')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test_deactivate_zoneset(self, ssh_execute_mock):
ssh_execute_mock.return_value = True
return_value = self.deactivate_zoneset()
self.assertTrue(return_value)
- @mock.patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
- @mock.patch.object(BrcdFCZoneClientCLI, '_cfg_save')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_cfg_save')
def test_delete_zones_activate_false(self, cfg_save_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') as zone_delete_mock:
zone_delete_mock.assert_called_once_with(zone_names_to_delete)
cfg_save_mock.assert_called_once_with()
- @patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
- @patch.object(BrcdFCZoneClientCLI, 'activate_zoneset')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'activate_zoneset')
def test_delete_zones_activate_true(self, activate_zs_mock,
apply_zone_change_mock):
with mock.patch.object(self, '_zone_delete') \
activate_zs_mock.assert_called_once_with(
active_zoneset['active_zone_config'])
- @patch.object(BrcdFCZoneClientCLI, '_get_switch_info')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list = []
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list, ns_info_list_expected)
- @patch.object(BrcdFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.BrocadeZoningCliException,
self.get_nameserver_info)
- @patch.object(BrcdFCZoneClientCLI, '_ssh_execute')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_ssh_execute')
def test__cfg_save(self, ssh_execute_mock):
cmd_list = [ZoneConstant.CFG_SAVE]
self._cfg_save()
ssh_execute_mock.assert_called_once_with(cmd_list, True, 1)
- @patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__zone_delete(self, apply_zone_change_mock):
zone_name = 'testzone'
cmd_list = ['zonedelete', '"testzone"']
self._zone_delete(zone_name)
apply_zone_change_mock.assert_called_once_with(cmd_list)
- @patch.object(BrcdFCZoneClientCLI, 'apply_zone_change')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, 'apply_zone_change')
def test__cfg_trans_abort(self, apply_zone_change_mock):
cmd_list = [ZoneConstant.CFG_ZONE_TRANS_ABORT]
with mock.patch.object(self, '_is_trans_abortable') \
is_trans_abortable_mock.assert_called_once_with()
apply_zone_change_mock.assert_called_once_with(cmd_list)
- @patch.object(BrcdFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_true(self, run_ssh_mock):
cmd_list = [ZoneConstant.CFG_SHOW_TRANS]
run_ssh_mock.return_value = (Stream(ZoneConstant.TRANS_ABORTABLE),
self.assertTrue(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
- @patch.object(BrcdFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_ssh_error(self, run_ssh_mock):
run_ssh_mock.return_value = (Stream(), Stream())
self.assertRaises(exception.BrocadeZoningCliException,
self._is_trans_abortable)
- @patch.object(BrcdFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__is_trans_abortable_false(self, run_ssh_mock):
cmd_list = [ZoneConstant.CFG_SHOW_TRANS]
cfgtransshow = 'There is no outstanding zoning transaction'
self.assertFalse(data)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
- @patch.object(BrcdFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test_apply_zone_change(self, run_ssh_mock):
cmd_list = [ZoneConstant.CFG_SAVE]
run_ssh_mock.return_value = (None, None)
self.apply_zone_change(cmd_list)
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
- @patch.object(BrcdFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [ZoneConstant.NS_SHOW]
nsshow_list = [nsshow]
self.assertRaises(exception.InvalidParameterValue,
self._parse_ns_output, invalid_switch_data)
- @patch.object(BrcdFCZoneClientCLI, '_execute_shell_cmd')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (supported_firmware, None)
self.assertTrue(self.is_supported_firmware())
- @patch.object(BrcdFCZoneClientCLI, '_execute_shell_cmd')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_invalid(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (unsupported_firmware, None)
self.assertFalse(self.is_supported_firmware())
- @patch.object(BrcdFCZoneClientCLI, '_execute_shell_cmd')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_no_ssh_response(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.return_value = (None, Stream())
self.assertFalse(self.is_supported_firmware())
- @patch.object(BrcdFCZoneClientCLI, '_execute_shell_cmd')
+ @mock.patch.object(client_cli.BrcdFCZoneClientCLI, '_execute_shell_cmd')
def test_is_supported_firmware_ssh_error(self, exec_shell_cmd_mock):
exec_shell_cmd_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.BrocadeZoningCliException,
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
-from cinder.zonemanager.drivers.brocade.brcd_fc_zone_driver \
- import BrcdFCZoneDriver
+from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as driver
LOG = logging.getLogger(__name__)
fabric_map = {}
return fabric_map
- @mock.patch.object(BrcdFCZoneDriver, '_get_active_zone_set')
+ @mock.patch.object(driver.BrcdFCZoneDriver, '_get_active_zone_set')
def test_add_connection(self, get_active_zs_mock):
"""Normal flow for i-t mode."""
GlobalVars._is_normal_test = True
self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
self.assertTrue(_zone_name in GlobalVars._zone_state)
- @mock.patch.object(BrcdFCZoneDriver, '_get_active_zone_set')
+ @mock.patch.object(driver.BrcdFCZoneDriver, '_get_active_zone_set')
def test_delete_connection(self, get_active_zs_mock):
GlobalVars._is_normal_test = True
get_active_zs_mock.return_value = _active_cfg_before_delete
'BRCD_FAB_1', _initiator_target_map)
self.assertFalse(_zone_name in GlobalVars._zone_state)
- @mock.patch.object(BrcdFCZoneDriver, '_get_active_zone_set')
+ @mock.patch.object(driver.BrcdFCZoneDriver, '_get_active_zone_set')
def test_add_connection_for_initiator_mode(self, get_active_zs_mock):
"""Normal flow for i mode."""
GlobalVars._is_normal_test = True
self.driver.add_connection('BRCD_FAB_1', _initiator_target_map)
self.assertTrue(_zone_name in GlobalVars._zone_state)
- @mock.patch.object(BrcdFCZoneDriver, '_get_active_zone_set')
+ @mock.patch.object(driver.BrcdFCZoneDriver, '_get_active_zone_set')
def test_delete_connection_for_initiator_mode(self, get_active_zs_mock):
GlobalVars._is_normal_test = True
get_active_zs_mock.return_value = _active_cfg_before_delete
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
-from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
+from cinder.zonemanager import fc_san_lookup_service as san_service
LOG = logging.getLogger(__name__)
_fabric_wwn = '100000051e55a100'
-class TestFCSanLookupService(FCSanLookupService, test.TestCase):
+class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase):
def setUp(self):
super(TestFCSanLookupService, self).setUp()
import cinder.zonemanager.drivers.cisco.cisco_fc_san_lookup_service \
as cisco_lookup
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
-from cinder.zonemanager.utils import get_formatted_wwn
+from cinder.zonemanager import utils as zm_utils
nsshow = '20:1a:00:05:1e:e8:e3:29'
switch_data = ['VSAN 304\n',
wwn_list = ['10008c7cff523b01']
return_wwn_list = []
expected_wwn_list = ['10:00:8c:7c:ff:52:3b:01']
- return_wwn_list.append(get_formatted_wwn(wwn_list[0]))
+ return_wwn_list.append(zm_utils.get_formatted_wwn(wwn_list[0]))
self.assertEqual(return_wwn_list, expected_wwn_list)
@mock.patch.object(cisco_lookup.CiscoFCSanLookupService,
"""Unit tests for Cisco fc zone client cli."""
-from mock import patch
+import mock
from oslo_concurrency import processutils
from cinder import exception
from cinder import test
-from cinder.zonemanager.drivers.cisco.cisco_fc_zone_client_cli \
- import CiscoFCZoneClientCLI
+from cinder.zonemanager.drivers.cisco \
+ import cisco_fc_zone_client_cli as cli
import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
nsshow = '20:1a:00:05:1e:e8:e3:29'
zone_names_to_delete = 'openstack50060b0000c26604201900051ee8e329'
-class TestCiscoFCZoneClientCLI(CiscoFCZoneClientCLI, test.TestCase):
+class TestCiscoFCZoneClientCLI(cli.CiscoFCZoneClientCLI, test.TestCase):
def setUp(self):
super(TestCiscoFCZoneClientCLI, self).setUp()
def __init__(self, *args, **kwargs):
test.TestCase.__init__(self, *args, **kwargs)
- @patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_active_zone_set(self, get_switch_info_mock):
cmd_list = [ZoneConstant.GET_ACTIVE_ZONE_CFG, self.fabric_vsan,
' | no-more']
get_switch_info_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(active_zoneset_returned, active_zoneset)
- @patch.object(CiscoFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh')
def test_get_active_zone_set_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.CiscoZoningCliException,
self.get_active_zone_set)
- @patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_basic(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value = zoning_status_data_basic
get_zoning_status_mock.assert_called_once_with(cmd_list)
self.assertDictMatch(zoning_status_returned, zoning_status_basic)
- @patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_enhanced_nosess(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value =\
self.assertDictMatch(zoning_status_returned,
zoning_status_enhanced_nosess)
- @patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_zoning_status_enhanced_sess(self, get_zoning_status_mock):
cmd_list = [ZoneConstant.GET_ZONE_STATUS, self.fabric_vsan]
get_zoning_status_mock.return_value = zoning_status_data_enhanced_sess
self.assertDictMatch(zoning_status_returned,
zoning_status_enhanced_sess)
- @patch.object(CiscoFCZoneClientCLI, '_get_switch_info')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_get_switch_info')
def test_get_nameserver_info(self, get_switch_info_mock):
ns_info_list = []
ns_info_list_expected = ['20:1a:00:05:1e:e8:e3:29']
ns_info_list = self.get_nameserver_info()
self.assertEqual(ns_info_list, ns_info_list_expected)
- @patch.object(CiscoFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh')
def test_get_nameserver_info_ssh_error(self, run_ssh_mock):
run_ssh_mock.side_effect = processutils.ProcessExecutionError
self.assertRaises(exception.CiscoZoningCliException,
self.get_nameserver_info)
- @patch.object(CiscoFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh')
def test__cfg_save(self, run_ssh_mock):
cmd_list = ['copy', 'running-config', 'startup-config']
self._cfg_save()
run_ssh_mock.assert_called_once_with(cmd_list, True, 1)
- @patch.object(CiscoFCZoneClientCLI, '_run_ssh')
+ @mock.patch.object(cli.CiscoFCZoneClientCLI, '_run_ssh')
def test__get_switch_info(self, run_ssh_mock):
cmd_list = [ZoneConstant.FCNS_SHOW, self.fabric_vsan]
nsshow_list = [nsshow]
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
-from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
+from cinder.zonemanager import fc_san_lookup_service as san_service
_target_ns_map = {'100000051e55a100': ['20240002ac000a50']}
_initiator_ns_map = {'100000051e55a100': ['10008c7cff523b01']}
_fabric_wwn = '100000051e55a100'
-class TestFCSanLookupService(FCSanLookupService, test.TestCase):
+class TestFCSanLookupService(san_service.FCSanLookupService, test.TestCase):
def setUp(self):
super(TestFCSanLookupService, self).setUp()
"""Unit tests for FC Zone Manager."""
import mock
-from mock import Mock
from cinder import exception
from cinder import test
from cinder.volume import configuration as conf
-from cinder.zonemanager.drivers.fc_zone_driver import FCZoneDriver
+from cinder.zonemanager.drivers import fc_zone_driver
from cinder.zonemanager import fc_zone_manager
fabric_name = 'BRCD_FAB_3'
config.fc_fabric_names = fabric_name
def fake_build_driver(self):
- self.driver = Mock(FCZoneDriver)
+ self.driver = mock.Mock(fc_zone_driver.FCZoneDriver)
self.stubs.Set(fc_zone_manager.ZoneManager, '_build_driver',
fake_build_driver)
self.zm = fc_zone_manager.ZoneManager(configuration=config)
self.configuration = conf.Configuration(None)
self.configuration.fc_fabric_names = fabric_name
- self.driver = Mock(FCZoneDriver)
+ self.driver = mock.Mock(fc_zone_driver.FCZoneDriver)
def __init__(self, *args, **kwargs):
super(TestFCZoneManager, self).__init__(*args, **kwargs)
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
-from cinder.volume.drivers.cloudbyte.options import (
- cloudbyte_create_volume_opts
-)
-from cinder.volume.drivers.cloudbyte.options import cloudbyte_add_qosgroup_opts
-from cinder.volume.drivers.cloudbyte.options import cloudbyte_connection_opts
+from cinder.volume.drivers.cloudbyte import options
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(CloudByteISCSIDriver, self).__init__(*args, **kwargs)
- self.configuration.append_config_values(cloudbyte_add_qosgroup_opts)
- self.configuration.append_config_values(cloudbyte_create_volume_opts)
- self.configuration.append_config_values(cloudbyte_connection_opts)
+ self.configuration.append_config_values(
+ options.cloudbyte_add_qosgroup_opts)
+ self.configuration.append_config_values(
+ options.cloudbyte_create_volume_opts)
+ self.configuration.append_config_values(
+ options.cloudbyte_connection_opts)
self.get_volume_stats()
def _get_url(self, cmd, params, apikey):
from cinder.openstack.common import log as logging
from cinder.volume import driver
from cinder.volume.drivers.emc import emc_vnx_cli
-from cinder.zonemanager.utils import AddFCZone
-from cinder.zonemanager.utils import RemoveFCZone
+from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
"""Make sure volume is exported."""
pass
- @AddFCZone
+ @zm_utils.AddFCZone
def initialize_connection(self, volume, connector):
"""Initializes the connection and returns connection info.
% {'conn_info': conn_info})
return conn_info
- @RemoveFCZone
+ @zm_utils.RemoveFCZone
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
conn_info = self.cli.terminate_connection(volume, connector)
# Handle case where we are running in a monkey patched environment
if patcher.is_monkey_patched('socket'):
- from eventlet.green.OpenSSL.SSL import GreenConnection as Connection
+ from eventlet.green.OpenSSL import SSL
else:
raise ImportError
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
- self.connection = Connection(*args, **kwargs)
+ self.connection = SSL.Connection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)
import datetime
import random
import re
-from xml.dom.minidom import parseString
+from xml.dom import minidom
import six
myFile = open(fileName, 'r')
data = myFile.read()
myFile.close()
- dom = parseString(data)
+ dom = minidom.parseString(data)
portGroupElements = dom.getElementsByTagName('PortGroup')
if portGroupElements is not None and len(portGroupElements) > 0:
myFile = open(fileName, 'r')
data = myFile.read()
myFile.close()
- dom = parseString(data)
+ dom = minidom.parseString(data)
tag = dom.getElementsByTagName(stringToParse)
if tag is not None and len(tag) > 0:
strXml = tag[0].toxml()
from taskflow.types import failure
from cinder import exception
-from cinder.exception import EMCVnxCLICmdError
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
from cinder import utils
-from cinder.volume.configuration import Configuration
+from cinder.volume import configuration as config
from cinder.volume.drivers.san import san
from cinder.volume import manager
from cinder.volume import utils as vol_utils
'-tieringPolicy', 'noMovement']}
def _raise_cli_error(self, cmd=None, rc=None, out='', **kwargs):
- raise EMCVnxCLICmdError(cmd=cmd,
- rc=rc,
- out=out.split('\n'),
- **kwargs)
+ raise exception.EMCVnxCLICmdError(cmd=cmd,
+ rc=rc,
+ out=out.split('\n'),
+ **kwargs)
def create_lun_with_advance_feature(self, pool, name, size,
provisioning, tiering,
if provisioning == 'compressed':
self.enable_or_disable_compression_on_lun(
name, 'on')
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on enable compression on lun %s."),
if consistencygroup_id:
self.add_lun_to_consistency_group(
consistencygroup_id, data['lun_id'])
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
with excutils.save_and_reraise_exception():
self.delete_lun(name)
LOG.error(_LE("Error on adding lun to consistency"
return (data[self.LUN_STATE.key] == 'Ready' and
data[self.LUN_STATUS.key] == 'OK(0x0)' and
data[self.LUN_OPERATION.key] == 'None')
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
orig_out = "\n".join(ex.kwargs["out"])
if orig_out.find(
self.CLI_RESP_PATTERN_LUN_NOT_EXIST) >= 0:
dst_name=None):
try:
self.migrate_lun(src_id, dst_id)
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
migration_succeed = False
orig_out = "\n".join(ex.kwargs["out"])
if self._is_sp_unavailable_error(orig_out):
self.configuration.check_max_pool_luns_threshold)
# if zoning_mode is fabric, use lookup service to build itor_tgt_map
self.zonemanager_lookup_service = None
- zm_conf = Configuration(manager.volume_manager_opts)
+ zm_conf = config.Configuration(manager.volume_manager_opts)
if (zm_conf.safe_get('zoning_mode') == 'fabric' or
self.configuration.safe_get('zoning_mode') == 'fabric'):
- from cinder.zonemanager.fc_san_lookup_service \
- import FCSanLookupService
+ from cinder.zonemanager import fc_san_lookup_service as fc_service
self.zonemanager_lookup_service = \
- FCSanLookupService(configuration=configuration)
+ fc_service.FCSanLookupService(configuration=configuration)
self.max_retries = 5
if self.destroy_empty_sg:
LOG.warning(_LW("destroy_empty_storage_group: True. "
"""Deletes an EMC volume."""
try:
self._client.delete_lun(volume['name'])
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
orig_out = "\n".join(ex.kwargs["out"])
if (self.force_delete_lun_in_sg and
(self._client.CLI_RESP_PATTERN_LUN_IN_SG_1 in orig_out or
def assure_host_in_storage_group(self, hostname, storage_group):
try:
self._client.connect_host_to_storage_group(hostname, storage_group)
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
if ex.kwargs["rc"] == 83:
# SG was not created or was destroyed by another concurrent
# operation before connected.
try:
sgdata = self._client.get_storage_group(hostname,
poll=False)
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
if ex.kwargs["rc"] != 83:
raise ex
# Storage Group has not existed yet
self.hlu_cache[hostname] = {}
self.hlu_cache[hostname][lun_id] = hlu
return hlu, sgdata
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
LOG.debug("Add HLU to storagegroup failed, retry %s",
tried)
elif tried == 1:
try:
lun_map = self.get_lun_map(hostname)
self.hlu_cache[hostname] = lun_map
- except EMCVnxCLICmdError as ex:
+ except exception.EMCVnxCLICmdError as ex:
if ex.kwargs["rc"] == 83:
LOG.warning(_LW("Storage Group %s is not found. "
"terminate_connection() is "
from cinder.openstack.common import log as logging
from cinder import ssh_utils
from cinder import utils
-from cinder.volume.drivers.san import SanISCSIDriver
+from cinder.volume.drivers import san
LOG = logging.getLogger(__name__)
return __inner
-class DellEQLSanISCSIDriver(SanISCSIDriver):
+class DellEQLSanISCSIDriver(san.SanISCSIDriver):
"""Implements commands for Dell EqualLogic SAN ISCSI management.
To enable the driver add the following line to the cinder configuration:
import base64
import hashlib
import time
-from xml.dom.minidom import parseString
+from xml.dom import minidom
from oslo_config import cfg
from oslo_utils import units
file = open(filename, 'r')
data = file.read()
file.close()
- dom = parseString(data)
+ dom = minidom.parseString(data)
storageTypes = dom.getElementsByTagName('StorageType')
if storageTypes is not None and len(storageTypes) > 0:
storageType = storageTypes[0].toxml()
file = open(filename, 'r')
data = file.read()
file.close()
- dom = parseString(data)
+ dom = minidom.parseString(data)
snappools = dom.getElementsByTagName('SnapPool')
if snappools is not None and len(snappools) > 0:
snappool = snappools[0].toxml()
file = open(filename, 'r')
data = file.read()
file.close()
- dom = parseString(data)
+ dom = minidom.parseString(data)
timeouts = dom.getElementsByTagName('Timeout')
if timeouts is not None and len(timeouts) > 0:
timeout = timeouts[0].toxml().replace('<Timeout>', '')
file = open(filename, 'r')
data = file.read()
file.close()
- dom = parseString(data)
+ dom = minidom.parseString(data)
ecomUsers = dom.getElementsByTagName('EcomUserName')
if ecomUsers is not None and len(ecomUsers) > 0:
ecomUser = ecomUsers[0].toxml().replace('<EcomUserName>', '')
file = open(filename, 'r')
data = file.read()
file.close()
- dom = parseString(data)
+ dom = minidom.parseString(data)
ecomIps = dom.getElementsByTagName('EcomServerIp')
if ecomIps is not None and len(ecomIps) > 0:
ecomIp = ecomIps[0].toxml().replace('<EcomServerIp>', '')
from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
from cinder.openstack.common import loopingcall
-from cinder.volume.drivers.san.san import SanISCSIDriver
+from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
return
-class FIOioControlDriver(SanISCSIDriver):
+class FIOioControlDriver(san.SanISCSIDriver):
"""Fusion-io ioControl iSCSI volume driver."""
VERSION = '1.0.0'
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume import driver
-from cinder.volume.drivers.hds.hus_backend import HusBackend
+from cinder.volume.drivers.hds import hus_backend
HDS_VERSION = '1.0.2'
def factory_bend():
"""Factory over-ride in self-tests."""
- return HusBackend()
+ return hus_backend.HusBackend()
def _loc_info(loc):
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
from cinder.volume import driver
-from cinder.volume.drivers.hds.hnas_backend import HnasBackend
+from cinder.volume.drivers.hds import hnas_backend
from cinder.volume import utils
def factory_bend(type):
- return HnasBackend()
+ return hnas_backend.HnasBackend()
def _loc_info(loc):
from cinder.i18n import _, _LE, _LI
from cinder.image import image_utils
from cinder.openstack.common import log as logging
-from cinder.volume.drivers.hds.hnas_backend import HnasBackend
+from cinder.volume.drivers.hds import hnas_backend
from cinder.volume.drivers import nfs
def factory_bend():
"""Factory over-ride in self-tests."""
- return HnasBackend()
+ return hnas_backend.HnasBackend()
class HDSNFSDriver(nfs.NfsDriver):
"""
-from contextlib import nested
import re
import threading
pool_id = self.configuration.hitachi_pool_id
lock = basic_lib.get_process_lock(self.storage_lock_file)
- with nested(self.storage_obj_lock, lock):
+ with self.storage_obj_lock, lock:
ldev = self.create_ldev(size, ldev_range, pool_id, is_vvol)
return ldev
"""
-from contextlib import nested
import os
import threading
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
- with nested(self.common.volume_info[ldev]['lock'],
- self.common.volume_info[ldev]['in_use']):
+ with self.common.volume_info[ldev]['lock'],\
+ self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
properties = self._get_properties(volume, hostgroups)
LOG.debug('Initialize volume_info: %s'
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
- with nested(self.common.volume_info[ldev]['lock'],
- self.common.volume_info[ldev]['in_use']):
+ with self.common.volume_info[ldev]['lock'],\
+ self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
properties = self._get_properties(volume, hostgroups,
terminate=True)
# License for the specific language governing permissions and limitations
# under the License.
-from contextlib import nested
-from functools import wraps
+import functools
import os
import re
import shlex
def horcm_synchronized(function):
- @wraps(function)
+ @functools.wraps(function)
def wrapper(*args, **kargs):
if len(args) == 1:
inst = args[0].conf.hitachi_horcm_numbers[0]
raidcom_obj_lock = args[0].raidcom_pair_lock
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
- with nested(raidcom_obj_lock, lock):
+ with raidcom_obj_lock, lock:
return function(*args, **kargs)
return wrapper
def storage_synchronized(function):
- @wraps(function)
+ @functools.wraps(function)
def wrapper(*args, **kargs):
serial = args[0].conf.hitachi_serial_number
resource_lock = args[0].resource_lock
resource_lock_file = '%s%s' % (RESOURCE_LOCK_FILE, serial)
lock = basic_lib.get_process_lock(resource_lock_file)
- with nested(resource_lock, lock):
+ with resource_lock, lock:
return function(*args, **kargs)
return wrapper
raidcom_lock_file = '%s%d' % (RAIDCOM_LOCK_FILE, inst)
lock = basic_lib.get_process_lock(raidcom_lock_file)
- with nested(raidcom_obj_lock, lock):
+ with raidcom_obj_lock, lock:
ret, stdout, stderr = self.exec_command(cmd, args=args,
printflag=printflag)
elif ret in HORCM_ERROR:
_ret = 0
- with nested(raidcom_obj_lock, lock):
+ with raidcom_obj_lock, lock:
if self.check_horcm(inst) != HORCM_RUNNING:
_ret, _stdout, _stderr = self.start_horcm(inst)
if _ret and _ret != HORCM_RUNNING:
"""
-from contextlib import nested
import os
import threading
msg = basic_lib.output_err(619, volume_id=volume['id'])
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
- with nested(self.common.volume_info[ldev]['lock'],
- self.common.volume_info[ldev]['in_use']):
+ with self.common.volume_info[ldev]['lock'],\
+ self.common.volume_info[ldev]['in_use']:
hostgroups = self._initialize_connection(ldev, connector)
protocol = 'iscsi'
properties = self._get_properties(volume, hostgroups)
raise exception.HBSDError(message=msg)
self.common.add_volinfo(ldev, volume['id'])
- with nested(self.common.volume_info[ldev]['lock'],
- self.common.volume_info[ldev]['in_use']):
+ with self.common.volume_info[ldev]['lock'],\
+ self.common.volume_info[ldev]['in_use']:
self._terminate_connection(ldev, connector, hostgroups)
def create_export(self, context, volume):
# License for the specific language governing permissions and limitations
# under the License.
-from contextlib import nested
import re
import shlex
import threading
def _wait_for_exec_hsnm(self, args, printflag, noretry, timeout, start):
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
- with nested(self.hsnm_lock, lock):
+ with self.hsnm_lock, lock:
ret, stdout, stderr = self.exec_command('env', args=args,
printflag=printflag)
import pexpect
lock = basic_lib.get_process_lock(self.hsnm_lock_file)
- with nested(self.hsnm_lock, lock):
+ with self.hsnm_lock, lock:
try:
child = pexpect.spawn(cmd)
child.expect('Secret: ', timeout=CHAP_TIMEOUT)
"""
from cinder.volume import driver
-from cinder.volume.drivers.huawei.rest_common import RestCommon
+from cinder.volume.drivers.huawei import rest_common
from cinder.zonemanager import utils as fczm_utils
def do_setup(self, context):
"""Instantiate common class and log in storage system."""
- self.common = RestCommon(configuration=self.configuration)
+ self.common = rest_common.RestCommon(configuration=self.configuration)
return self.common.login()
def check_for_setup_error(self):
def do_setup(self, context):
"""Instantiate common class and log in storage system."""
- self.common = RestCommon(configuration=self.configuration)
+ self.common = rest_common.RestCommon(configuration=self.configuration)
return self.common.login()
def check_for_setup_error(self):
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.volume.drivers import nfs
-from cinder.volume.drivers.remotefs import nas_opts
+from cinder.volume.drivers import remotefs
from cinder.volume.drivers.san import san
VERSION = '1.1.0'
def __init__(self, execute=utils.execute, *args, **kwargs):
self._context = None
super(IBMNAS_NFSDriver, self).__init__(*args, **kwargs)
- self.configuration.append_config_values(nas_opts)
+ self.configuration.append_config_values(remotefs.nas_opts)
self.configuration.append_config_values(platform_opts)
self.configuration.san_ip = self.configuration.nas_ip
self.configuration.san_login = self.configuration.nas_login
from cinder.i18n import _, _LI
from cinder.openstack.common import log as logging
from cinder.volume import driver
-from cinder.volume.drivers.netapp.options import netapp_proxy_opts
+from cinder.volume.drivers.netapp import options
from cinder.volume.drivers.netapp import utils as na_utils
raise exception.InvalidInput(
reason=_('Required configuration not found'))
- config.append_config_values(netapp_proxy_opts)
+ config.append_config_values(options.netapp_proxy_opts)
na_utils.check_flags(NetAppDriver.REQUIRED_FLAGS, config)
app_version = na_utils.OpenStackInfo().info()
from cinder import exception
from cinder.i18n import _, _LW
from cinder.openstack.common import log as logging
-from cinder.volume.configuration import Configuration
+from cinder.volume import configuration
from cinder.volume.drivers.netapp.dataontap import block_base
from cinder.volume.drivers.netapp.dataontap.client import client_7mode
from cinder.volume.drivers.netapp import options as na_opts
def _do_partner_setup(self):
partner_backend = self.configuration.netapp_partner_backend_name
if partner_backend:
- config = Configuration(na_opts.netapp_7mode_opts, partner_backend)
+ config = configuration.Configuration(na_opts.netapp_7mode_opts,
+ partner_backend)
config.append_config_values(na_opts.netapp_connection_opts)
config.append_config_values(na_opts.netapp_basicauth_opts)
config.append_config_values(na_opts.netapp_transport_opts)
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
-from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
+from cinder.volume.drivers.netapp.dataontap.client import api as na_api
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
initiator_type, os)
try:
return self.zapi_client.map_lun(path, igroup_name, lun_id=lun_id)
- except NaApiError:
+ except na_api.NaApiError:
exc_info = sys.exc_info()
(_igroup, lun_id) = self._find_mapped_lun_igroup(path,
initiator_list)
from cinder.i18n import _LE, _LW, _LI
from cinder.openstack.common import log as logging
from cinder.volume.drivers.netapp.dataontap.client import api as netapp_api
-from cinder.volume.drivers.netapp.dataontap.client.api import NaApiError
-from cinder.volume.drivers.netapp.dataontap.client.api import NaElement
-from cinder.volume.drivers.netapp.dataontap.client.api import NaServer
LOG = logging.getLogger(__name__)
class Client(object):
def __init__(self, **kwargs):
- self.connection = NaServer(host=kwargs['hostname'],
- transport_type=kwargs['transport_type'],
- port=kwargs['port'],
- username=kwargs['username'],
- password=kwargs['password'])
+ self.connection = netapp_api.NaServer(
+ host=kwargs['hostname'],
+ transport_type=kwargs['transport_type'],
+ port=kwargs['port'],
+ username=kwargs['username'],
+ password=kwargs['password'])
def get_ontapi_version(self, cached=True):
"""Gets the supported ontapi version."""
def check_is_naelement(self, elem):
"""Checks if object is instance of NaElement."""
- if not isinstance(elem, NaElement):
+ if not isinstance(elem, netapp_api.NaElement):
raise ValueError('Expects NaElement')
def create_lun(self, volume_name, lun_name, size, metadata,
"""
def _create_ems(netapp_backend, app_version, server_type):
"""Create ems API request."""
- ems_log = NaElement('ems-autosupport-log')
+ ems_log = netapp_api.NaElement('ems-autosupport-log')
host = socket.getfqdn() or 'Cinder_node'
if server_type == "cluster":
dest = "cluster node"
def _create_vs_get():
"""Create vs_get API request."""
- vs_get = NaElement('vserver-get-iter')
+ vs_get = netapp_api.NaElement('vserver-get-iter')
vs_get.add_new_child('max-records', '1')
- query = NaElement('query')
+ query = netapp_api.NaElement('query')
query.add_node_with_children('vserver-info',
**{'vserver-type': 'node'})
vs_get.add_child_elem(query)
- desired = NaElement('desired-attributes')
+ desired = netapp_api.NaElement('desired-attributes')
desired.add_node_with_children(
'vserver-info', **{'vserver-name': '', 'vserver-type': ''})
vs_get.add_child_elem(desired)
if api_version:
major, minor = api_version
else:
- raise NaApiError(code='Not found',
- message='No API version found')
+ raise netapp_api.NaApiError(
+ code='Not found',
+ message='No API version found')
if major == 1 and minor > 15:
node = getattr(requester, 'vserver', None)
else:
node = _get_cluster_node(na_server)
if node is None:
- raise NaApiError(code='Not found',
- message='No vserver found')
+ raise netapp_api.NaApiError(
+ code='Not found',
+ message='No vserver found')
na_server.set_vserver(node)
else:
na_server.set_vfiler(None)
na_server.invoke_successfully(ems, True)
LOG.debug("ems executed successfully.")
- except NaApiError as e:
+ except netapp_api.NaApiError as e:
LOG.warning(_LW("Failed to invoke ems. Message : %s") % e)
finally:
requester.last_ems = timeutils.utcnow()
from cinder.openstack.common import log as logging
from cinder.volume import driver
-from cinder.volume.drivers.netapp.dataontap.block_7mode import \
- NetAppBlockStorage7modeLibrary as lib_7mode
+from cinder.volume.drivers.netapp.dataontap import block_7mode
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(NetApp7modeFibreChannelDriver, self).__init__(*args, **kwargs)
- self.library = lib_7mode(self.DRIVER_NAME, 'FC', **kwargs)
+ self.library = block_7mode.NetAppBlockStorage7modeLibrary(
+ self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
from cinder.openstack.common import log as logging
from cinder.volume import driver
-from cinder.volume.drivers.netapp.dataontap.block_cmode import \
- NetAppBlockStorageCmodeLibrary as lib_cmode
+from cinder.volume.drivers.netapp.dataontap import block_cmode
from cinder.zonemanager import utils as fczm_utils
LOG = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(NetAppCmodeFibreChannelDriver, self).__init__(*args, **kwargs)
- self.library = lib_cmode(self.DRIVER_NAME, 'FC', **kwargs)
+ self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
+ self.DRIVER_NAME, 'FC', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
from cinder.openstack.common import log as logging
from cinder.volume import driver
-from cinder.volume.drivers.netapp.dataontap.block_7mode import \
- NetAppBlockStorage7modeLibrary as lib_7mode
+from cinder.volume.drivers.netapp.dataontap import block_7mode
LOG = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(NetApp7modeISCSIDriver, self).__init__(*args, **kwargs)
- self.library = lib_7mode(self.DRIVER_NAME, 'iSCSI', **kwargs)
+ self.library = block_7mode.NetAppBlockStorage7modeLibrary(
+ self.DRIVER_NAME, 'iSCSI', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
from cinder.openstack.common import log as logging
from cinder.volume import driver
-from cinder.volume.drivers.netapp.dataontap.block_cmode import \
- NetAppBlockStorageCmodeLibrary as lib_cmode
+from cinder.volume.drivers.netapp.dataontap import block_cmode
LOG = logging.getLogger(__name__)
def __init__(self, *args, **kwargs):
super(NetAppCmodeISCSIDriver, self).__init__(*args, **kwargs)
- self.library = lib_cmode(self.DRIVER_NAME, 'iSCSI', **kwargs)
+ self.library = block_cmode.NetAppBlockStorageCmodeLibrary(
+ self.DRIVER_NAME, 'iSCSI', **kwargs)
def do_setup(self, context):
self.library.do_setup(context)
import os
import re
-from threading import Timer
+import threading
import time
from oslo_concurrency import processutils
else:
# Set cleaning to True
self.cleaning = True
- t = Timer(0, self._clean_image_cache)
+ t = threading.Timer(0, self._clean_image_cache)
t.start()
def _clean_image_cache(self):
from cinder.volume.drivers.netapp.dataontap import ssc_cmode
from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
-from cinder.volume.drivers.netapp.utils import get_volume_extra_specs
from cinder.volume import utils as volume_utils
msg = _("Pool is not available in the volume host field.")
raise exception.InvalidHost(reason=msg)
- extra_specs = get_volume_extra_specs(volume)
+ extra_specs = na_utils.get_volume_extra_specs(volume)
qos_policy_group = extra_specs.pop('netapp:qos_policy_group', None) \
if extra_specs else None
qos_policy_group)
return {'provider_location': volume['provider_location']}
except Exception as ex:
- LOG.error(_LW("Exception creating vol %(name)s on "
+ LOG.error(_LW("Exception creattest_nfs.pying vol %(name)s on "
"share %(share)s. Details: %(ex)s")
% {'name': volume['name'],
'share': volume['provider_location'],
netapp_vol = self._get_vol_for_share(share)
LOG.debug("Found volume %(vol)s for share %(share)s."
% {'vol': netapp_vol, 'share': share})
- extra_specs = get_volume_extra_specs(volume)
+ extra_specs = na_utils.get_volume_extra_specs(volume)
vols = ssc_cmode.get_volumes_for_specs(self.ssc_vols, extra_specs)
return netapp_vol in vols
"""
import copy
-from threading import Timer
+import threading
from oslo_utils import timeutils
import six
if synchronous:
get_cluster_latest_ssc(backend, na_server, vserver)
else:
- t = Timer(0, get_cluster_latest_ssc,
- args=[backend, na_server, vserver])
+ t = threading.Timer(0, get_cluster_latest_ssc,
+ args=[backend, na_server, vserver])
t.start()
elif getattr(backend, 'refresh_stale_running', None):
LOG.warning(_LW('refresh stale ssc job in progress. Returning... '))
if synchronous:
refresh_cluster_stale_ssc(backend, na_server, vserver)
else:
- t = Timer(0, refresh_cluster_stale_ssc,
- args=[backend, na_server, vserver])
+ t = threading.Timer(0, refresh_cluster_stale_ssc,
+ args=[backend, na_server, vserver])
t.start()
from cinder.volume import driver
from cinder.volume.drivers.netapp.eseries import client
from cinder.volume.drivers.netapp.eseries import utils
-from cinder.volume.drivers.netapp.options import netapp_basicauth_opts
-from cinder.volume.drivers.netapp.options import netapp_connection_opts
-from cinder.volume.drivers.netapp.options import netapp_eseries_opts
-from cinder.volume.drivers.netapp.options import netapp_transport_opts
+from cinder.volume.drivers.netapp import options as na_opts
from cinder.volume.drivers.netapp import utils as na_utils
from cinder.volume import utils as volume_utils
CONF = cfg.CONF
-CONF.register_opts(netapp_basicauth_opts)
-CONF.register_opts(netapp_connection_opts)
-CONF.register_opts(netapp_eseries_opts)
-CONF.register_opts(netapp_transport_opts)
+CONF.register_opts(na_opts.netapp_basicauth_opts)
+CONF.register_opts(na_opts.netapp_connection_opts)
+CONF.register_opts(na_opts.netapp_eseries_opts)
+CONF.register_opts(na_opts.netapp_transport_opts)
class NetAppEseriesISCSIDriver(driver.ISCSIDriver):
def __init__(self, *args, **kwargs):
super(NetAppEseriesISCSIDriver, self).__init__(*args, **kwargs)
na_utils.validate_instantiation(**kwargs)
- self.configuration.append_config_values(netapp_basicauth_opts)
- self.configuration.append_config_values(netapp_connection_opts)
- self.configuration.append_config_values(netapp_transport_opts)
- self.configuration.append_config_values(netapp_eseries_opts)
+ self.configuration.append_config_values(na_opts.netapp_basicauth_opts)
+ self.configuration.append_config_values(
+ na_opts.netapp_connection_opts)
+ self.configuration.append_config_values(na_opts.netapp_transport_opts)
+ self.configuration.append_config_values(na_opts.netapp_eseries_opts)
self._backend_name = self.configuration.safe_get("volume_backend_name")\
or "NetApp_ESeries"
self._objects = {'disk_pool_refs': [], 'pools': [],
from cinder import exception
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
-from cinder.volume.drivers.san.san import SanISCSIDriver
+from cinder.volume.drivers.san import san
DRIVER_VERSION = '1.0'
message = _("Unexpected response from Nimble API")
-class NimbleISCSIDriver(SanISCSIDriver):
+class NimbleISCSIDriver(san.SanISCSIDriver):
"""OpenStack driver to enable Nimble Controller.
from cinder import exception
from cinder.i18n import _, _LE
from cinder.openstack.common import log as logging
-from cinder.volume.drivers.san.san import SanISCSIDriver
+from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
-class HPLeftHandCLIQProxy(SanISCSIDriver):
+class HPLeftHandCLIQProxy(san.SanISCSIDriver):
"""Executes commands relating to HP/LeftHand SAN ISCSI volumes.
We use the CLIQ interface, over SSH.
from cinder import exception
from cinder.i18n import _LE, _LI
from cinder.openstack.common import log as logging
-from cinder.volume.driver import VolumeDriver
+from cinder.volume import driver
from cinder.volume.drivers.san.hp import hp_lefthand_cliq_proxy as cliq_proxy
from cinder.volume.drivers.san.hp import hp_lefthand_rest_proxy as rest_proxy
MIN_CLIENT_VERSION = '1.0.3'
-class HPLeftHandISCSIDriver(VolumeDriver):
+class HPLeftHandISCSIDriver(driver.VolumeDriver):
"""Executes commands relating to HP/LeftHand SAN ISCSI volumes.
Version history:
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.openstack.common import log as logging
-from cinder.volume.driver import ISCSIDriver
+from cinder.volume import driver
from cinder.volume import utils
from cinder.volume import volume_types
try:
import hplefthandclient
- from hplefthandclient import client as hp_lh_client
from hplefthandclient import exceptions as hpexceptions
except ImportError:
import cinder.tests.fake_hp_lefthand_client as hplefthandclient
}
-class HPLeftHandRESTProxy(ISCSIDriver):
+class HPLeftHandRESTProxy(driver.ISCSIDriver):
"""Executes REST commands relating to HP/LeftHand SAN ISCSI volumes.
Version history:
client.logout()
def _create_client(self):
- return hp_lh_client.HPLeftHandClient(
+ return hplefthandclient.client.HPLeftHandClient(
self.configuration.hplefthand_api_url)
def do_setup(self, context):
# License for the specific language governing permissions and limitations
# under the License.
#
-from hashlib import md5
+import hashlib
import urllib2
from lxml import etree
def login(self):
"""Authenticates the service on the device."""
- hash = md5("%s_%s" % (self._login, self._password))
+ hash = hashlib.md5("%s_%s" % (self._login, self._password))
digest = hash.hexdigest()
url = self._base_url + "/login/" + digest
from oslo_utils import timeutils
from oslo_utils import units
import requests
-from six import wraps
+import six
from cinder import context
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.openstack.common import log as logging
-from cinder.volume.drivers.san.san import SanISCSIDriver
+from cinder.volume.drivers.san import san
from cinder.volume import qos_specs
from cinder.volume import volume_types
def retry(exc_tuple, tries=5, delay=1, backoff=2):
def retry_dec(f):
- @wraps(f)
+ @six.wraps(f)
def func_retry(*args, **kwargs):
_tries, _delay = tries, delay
while _tries > 1:
return retry_dec
-class SolidFireDriver(SanISCSIDriver):
+class SolidFireDriver(san.SanISCSIDriver):
"""OpenStack driver to enable SolidFire cluster.
Version history:
import os
if os.name == 'nt':
- from ctypes import windll
- from ctypes import wintypes
-
- kernel32 = windll.kernel32
- virtdisk = windll.virtdisk
+ kernel32 = ctypes.windll.kernel32
+ virtdisk = ctypes.windll.virtdisk
from cinder import exception
from cinder.i18n import _
if os.name == 'nt':
class Win32_GUID(ctypes.Structure):
- _fields_ = [("Data1", wintypes.DWORD),
- ("Data2", wintypes.WORD),
- ("Data3", wintypes.WORD),
- ("Data4", wintypes.BYTE * 8)]
+ _fields_ = [("Data1", ctypes.wintypes.DWORD),
+ ("Data2", ctypes.wintypes.WORD),
+ ("Data3", ctypes.wintypes.WORD),
+ ("Data4", ctypes.wintypes.BYTE * 8)]
class Win32_VIRTUAL_STORAGE_TYPE(ctypes.Structure):
_fields_ = [
- ('DeviceId', wintypes.ULONG),
+ ('DeviceId', ctypes.wintypes.ULONG),
('VendorId', Win32_GUID)
]
class Win32_RESIZE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
- ('Version', wintypes.DWORD),
+ ('Version', ctypes.wintypes.DWORD),
('NewSize', ctypes.c_ulonglong)
]
class Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V1(ctypes.Structure):
_fields_ = [
- ('Version', wintypes.DWORD),
+ ('Version', ctypes.wintypes.DWORD),
('RWDepth', ctypes.c_ulong),
]
class Win32_OPEN_VIRTUAL_DISK_PARAMETERS_V2(ctypes.Structure):
_fields_ = [
- ('Version', wintypes.DWORD),
- ('GetInfoOnly', wintypes.BOOL),
- ('ReadOnly', wintypes.BOOL),
+ ('Version', ctypes.wintypes.DWORD),
+ ('GetInfoOnly', ctypes.wintypes.BOOL),
+ ('ReadOnly', ctypes.wintypes.BOOL),
('ResiliencyGuid', Win32_GUID)
]
class Win32_MERGE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
- ('Version', wintypes.DWORD),
+ ('Version', ctypes.wintypes.DWORD),
('MergeDepth', ctypes.c_ulong)
]
class Win32_CREATE_VIRTUAL_DISK_PARAMETERS(ctypes.Structure):
_fields_ = [
- ('Version', wintypes.DWORD),
+ ('Version', ctypes.wintypes.DWORD),
('UniqueId', Win32_GUID),
('MaximumSize', ctypes.c_ulonglong),
- ('BlockSizeInBytes', wintypes.ULONG),
- ('SectorSizeInBytes', wintypes.ULONG),
- ('PhysicalSectorSizeInBytes', wintypes.ULONG),
- ('ParentPath', wintypes.LPCWSTR),
- ('SourcePath', wintypes.LPCWSTR),
- ('OpenFlags', wintypes.DWORD),
+ ('BlockSizeInBytes', ctypes.wintypes.ULONG),
+ ('SectorSizeInBytes', ctypes.wintypes.ULONG),
+ ('PhysicalSectorSizeInBytes', ctypes.wintypes.ULONG),
+ ('ParentPath', ctypes.wintypes.LPCWSTR),
+ ('SourcePath', ctypes.wintypes.LPCWSTR),
+ ('OpenFlags', ctypes.wintypes.DWORD),
('ParentVirtualStorageType', Win32_VIRTUAL_STORAGE_TYPE),
('SourceVirtualStorageType', Win32_VIRTUAL_STORAGE_TYPE),
('ResiliencyGuid', Win32_GUID)
]
class Win32_SIZE(ctypes.Structure):
- _fields_ = [("VirtualSize", wintypes.ULARGE_INTEGER),
- ("PhysicalSize", wintypes.ULARGE_INTEGER),
- ("BlockSize", wintypes.ULONG),
- ("SectorSize", wintypes.ULONG)]
+ _fields_ = [("VirtualSize", ctypes.wintypes.ULARGE_INTEGER),
+ ("PhysicalSize", ctypes.wintypes.ULARGE_INTEGER),
+ ("BlockSize", ctypes.wintypes.ULONG),
+ ("SectorSize", ctypes.wintypes.ULONG)]
class Win32_PARENT_LOCATION(ctypes.Structure):
- _fields_ = [('ParentResolved', wintypes.BOOL),
- ('ParentLocationBuffer', wintypes.WCHAR * 512)]
+ _fields_ = [('ParentResolved', ctypes.wintypes.BOOL),
+ ('ParentLocationBuffer', ctypes.wintypes.WCHAR * 512)]
class Win32_PHYSICAL_DISK(ctypes.Structure):
- _fields_ = [("LogicalSectorSize", wintypes.ULONG),
- ("PhysicalSectorSize", wintypes.ULONG),
- ("IsRemote", wintypes.BOOL)]
+ _fields_ = [("LogicalSectorSize", ctypes.wintypes.ULONG),
+ ("PhysicalSectorSize", ctypes.wintypes.ULONG),
+ ("IsRemote", ctypes.wintypes.BOOL)]
class Win32_VHD_INFO(ctypes.Union):
_fields_ = [("Size", Win32_SIZE),
("Identifier", Win32_GUID),
("ParentLocation", Win32_PARENT_LOCATION),
("ParentIdentifier", Win32_GUID),
- ("ParentTimestamp", wintypes.ULONG),
+ ("ParentTimestamp", ctypes.wintypes.ULONG),
("VirtualStorageType", Win32_VIRTUAL_STORAGE_TYPE),
- ("ProviderSubtype", wintypes.ULONG),
- ("Is4kAligned", wintypes.BOOL),
+ ("ProviderSubtype", ctypes.wintypes.ULONG),
+ ("Is4kAligned", ctypes.wintypes.BOOL),
("PhysicalDisk", Win32_PHYSICAL_DISK),
- ("VhdPhysicalSectorSize", wintypes.ULONG),
+ ("VhdPhysicalSectorSize", ctypes.wintypes.ULONG),
("SmallestSafeVirtualSize",
- wintypes.ULARGE_INTEGER),
- ("FragmentationPercentage", wintypes.ULONG)]
+ ctypes.wintypes.ULARGE_INTEGER),
+ ("FragmentationPercentage", ctypes.wintypes.ULONG)]
class Win32_GET_VIRTUAL_DISK_INFO_PARAMETERS(ctypes.Structure):
_fields_ = [("VERSION", ctypes.wintypes.UINT),
class Win32_SET_VIRTUAL_DISK_INFO_PARAMETERS(ctypes.Structure):
_fields_ = [
- ('Version', wintypes.DWORD),
- ('ParentFilePath', wintypes.LPCWSTR)
+ ('Version', ctypes.wintypes.DWORD),
+ ('ParentFilePath', ctypes.wintypes.LPCWSTR)
]
guid.Data1 = 0xec984aec
guid.Data2 = 0xa0f9
guid.Data3 = 0x47e9
- ByteArray8 = wintypes.BYTE * 8
+ ByteArray8 = ctypes.wintypes.BYTE * 8
guid.Data4 = ByteArray8(0x90, 0x1f, 0x71, 0x41, 0x5a, 0x66, 0x34, 0x5b)
return guid
vst.DeviceId = device_id
vst.VendorId = self._msft_vendor_id
- handle = wintypes.HANDLE()
+ handle = ctypes.wintypes.HANDLE()
ret_val = virtdisk.OpenVirtualDisk(ctypes.byref(vst),
ctypes.c_wchar_p(vhd_path),
params.SourceVirtualStorageType.DeviceId = src_device_id
params.SourceVirtualStorageType.VendorId = self._msft_vendor_id
- handle = wintypes.HANDLE()
+ handle = ctypes.wintypes.HANDLE()
create_virtual_disk_flag = self.create_virtual_disk_flags.get(
new_vhd_type)
infoSize = ctypes.sizeof(virt_disk_info)
- virtdisk.GetVirtualDiskInformation.restype = wintypes.DWORD
+ virtdisk.GetVirtualDiskInformation.restype = ctypes.wintypes.DWORD
ret_val = virtdisk.GetVirtualDiskInformation(
vhd_file, ctypes.byref(ctypes.c_ulong(infoSize)),
ZFS Storage Appliance NFS Cinder Volume Driver
"""
import base64
-from datetime import datetime
+import datetime as dt
import errno
from oslo_config import cfg
def _create_snapshot_name(self):
"""Creates a snapshot name from the date and time."""
- return 'cinder-zfssa-nfs-snapshot-%s' % datetime.now().isoformat()
+ return 'cinder-zfssa-nfs-snapshot-%s' % dt.datetime.now().isoformat()
def _get_share_capacity_info(self):
"""Get available and used capacity info for the NFS share."""
from cinder.openstack.common import periodic_task
from cinder import quota
from cinder import utils
-from cinder.volume.configuration import Configuration
+from cinder.volume import configuration as config
from cinder.volume.flows.manager import create_volume
from cinder.volume.flows.manager import manage_existing
from cinder.volume import rpcapi as volume_rpcapi
from cinder.volume import utils as vol_utils
from cinder.volume import volume_types
-from eventlet.greenpool import GreenPool
+from eventlet import greenpool
LOG = logging.getLogger(__name__)
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
- self.configuration = Configuration(volume_manager_opts,
- config_group=service_name)
- self._tp = GreenPool()
+ self.configuration = config.Configuration(volume_manager_opts,
+ config_group=service_name)
+ self._tp = greenpool.GreenPool()
self.stats = {}
if not volume_driver:
from cinder.i18n import _LW
from cinder.openstack.common import log as logging
-from cinder.volume.targets.tgt import TgtAdm
+from cinder.volume.targets import tgt
LOG = logging.getLogger(__name__)
-class ISERTgtAdm(TgtAdm):
+class ISERTgtAdm(tgt.TgtAdm):
VERSION = '0.2'
def __init__(self, *args, **kwargs):
from oslo_config import cfg
from cinder.openstack.common import log as logging
-from cinder.volume.configuration import Configuration
+from cinder.volume import configuration
brcd_zone_opts = [
cfg.StrOpt('fc_fabric_address',
def load_fabric_configurations(fabric_names):
fabric_configs = {}
for fabric_name in fabric_names:
- config = Configuration(brcd_zone_opts, fabric_name)
+ config = configuration.Configuration(brcd_zone_opts, fabric_name)
LOG.debug("Loaded FC fabric config %s" % fabric_name)
fabric_configs[fabric_name] = config
from cinder.openstack.common import log as logging
from cinder import utils
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts
-import cinder.zonemanager.drivers.brocade.fc_zone_constants as ZoneConstant
-from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
+import cinder.zonemanager.drivers.brocade.fc_zone_constants as zone_constant
+from cinder.zonemanager import fc_san_lookup_service as fc_service
LOG = logging.getLogger(__name__)
-class BrcdFCSanLookupService(FCSanLookupService):
+class BrcdFCSanLookupService(fc_service.FCSanLookupService):
"""The SAN lookup service that talks to Brocade switches.
Version History:
cli_output = None
nsinfo_list = []
try:
- cli_output = self._get_switch_data(ZoneConstant.NS_SHOW)
+ cli_output = self._get_switch_data(zone_constant.NS_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nsshow info for fabric"))
if cli_output:
nsinfo_list = self._parse_ns_output(cli_output)
try:
- cli_output = self._get_switch_data(ZoneConstant.NS_CAM_SHOW)
+ cli_output = self._get_switch_data(zone_constant.NS_CAM_SHOW)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed collecting nscamshow"))
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as fabric_opts
-from cinder.zonemanager.drivers.fc_zone_driver import FCZoneDriver
+from cinder.zonemanager.drivers import fc_zone_driver
LOG = logging.getLogger(__name__)
CONF.register_opts(brcd_opts, 'fc-zone-manager')
-class BrcdFCZoneDriver(FCZoneDriver):
+class BrcdFCZoneDriver(fc_zone_driver.FCZoneDriver):
"""Brocade FC zone driver implementation.
OpenStack Fibre Channel zone driver to manage FC zoning in
#
from oslo_config import cfg
-from cinder.volume.configuration import Configuration
+from cinder.volume import configuration
cisco_zone_opts = [
cfg.StrOpt('cisco_fc_fabric_address',
def load_fabric_configurations(fabric_names):
fabric_configs = {}
for fabric_name in fabric_names:
- config = Configuration(cisco_zone_opts, fabric_name)
+ config = configuration.Configuration(cisco_zone_opts, fabric_name)
fabric_configs[fabric_name] = config
return fabric_configs
from cinder import ssh_utils
from cinder import utils
from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts
-import cinder.zonemanager.drivers.cisco.fc_zone_constants as ZoneConstant
-from cinder.zonemanager.fc_san_lookup_service import FCSanLookupService
-from cinder.zonemanager.utils import get_formatted_wwn
+import cinder.zonemanager.drivers.cisco.fc_zone_constants as zone_constant
+from cinder.zonemanager import fc_san_lookup_service as fc_service
+from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
-class CiscoFCSanLookupService(FCSanLookupService):
+class CiscoFCSanLookupService(fc_service.FCSanLookupService):
"""The SAN lookup service that talks to Cisco switches.
Version History:
LOG.debug("FC Fabric List: %s", fabrics)
if fabrics:
for t in target_wwn_list:
- formatted_target_list.append(get_formatted_wwn(t))
+ formatted_target_list.append(zm_utils.get_formatted_wwn(t))
for i in initiator_wwn_list:
- formatted_initiator_list.append(get_formatted_wwn(i))
+ formatted_initiator_list.append(zm_utils.get_formatted_wwn(i))
for fabric_name in fabrics:
self.switch_ip = self.fabric_configs[fabric_name].safe_get(
cli_output = None
nsinfo_list = []
try:
- cmd = ([ZoneConstant.FCNS_SHOW, fabric_vsan, ' | no-more'])
+ cmd = ([zone_constant.FCNS_SHOW, fabric_vsan, ' | no-more'])
cli_output = self._get_switch_info(cmd)
except exception.FCSanLookupServiceException:
with excutils.save_and_reraise_exception():
from cinder.i18n import _, _LE, _LI
from cinder.openstack.common import log as logging
from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as fabric_opts
-from cinder.zonemanager.drivers.fc_zone_driver import FCZoneDriver
-from cinder.zonemanager.utils import get_formatted_wwn
+from cinder.zonemanager.drivers import fc_zone_driver
+from cinder.zonemanager import utils as zm_utils
LOG = logging.getLogger(__name__)
CONF.register_opts(cisco_opts, 'fc-zone-manager')
-class CiscoFCZoneDriver(FCZoneDriver):
+class CiscoFCZoneDriver(fc_zone_driver.FCZoneDriver):
"""Cisco FC zone driver implementation.
OpenStack Fibre Channel zone driver to manage FC zoning in
if zoning_policy == 'initiator-target':
for t in t_list:
target = t.lower()
- zone_members = [get_formatted_wwn(initiator),
- get_formatted_wwn(target)]
+ zone_members = [
+ zm_utils.get_formatted_wwn(initiator),
+ zm_utils.get_formatted_wwn(target)]
zone_name = (self.
configuration.cisco_zone_name_prefix
+ initiator.replace(':', '')
"Skipping zone creation %s"),
zone_name)
elif zoning_policy == 'initiator':
- zone_members = [get_formatted_wwn(initiator)]
+ zone_members = [
+ zm_utils.get_formatted_wwn(initiator)]
for t in t_list:
target = t.lower()
- zone_members.append(get_formatted_wwn(target))
+ zone_members.append(
+ zm_utils.get_formatted_wwn(target))
zone_name = self.configuration.cisco_zone_name_prefix \
+ initiator.replace(':', '')
LOG.debug("zone config from Fabric: %s", cfgmap_from_fabric)
for initiator_key in initiator_target_map.keys():
initiator = initiator_key.lower()
- formatted_initiator = get_formatted_wwn(initiator)
+ formatted_initiator = zm_utils.get_formatted_wwn(initiator)
zone_map = {}
zones_to_delete = []
t_list = initiator_target_map[initiator_key]
zone_members = [formatted_initiator]
for t in t_list:
target = t.lower()
- zone_members.append(get_formatted_wwn(target))
+ zone_members.append(
+ zm_utils.get_formatted_wwn(target))
zone_name = self.configuration.cisco_zone_name_prefix \
+ initiator.replace(':', '')
LOG.debug("Target wwn List: %s", target_wwn_list)
if len(fabrics) > 0:
for t in target_wwn_list:
- formatted_target_list.append(get_formatted_wwn(t.lower()))
+ formatted_target_list.append(
+ zm_utils.get_formatted_wwn(t.lower()))
LOG.debug("Formatted Target wwn List: %s", formatted_target_list)
for fabric_name in fabrics:
fabric_ip = self.fabric_configs[fabric_name].safe_get(
from cinder.i18n import _LI, _LW
from cinder.openstack.common import log
-from cinder.volume.configuration import Configuration
+from cinder.volume import configuration
from cinder.volume import manager
from cinder.zonemanager import fc_san_lookup_service
from cinder.zonemanager import fc_zone_manager
def create_zone_manager():
"""If zoning is enabled, build the Zone Manager."""
- config = Configuration(manager.volume_manager_opts)
+ config = configuration.Configuration(manager.volume_manager_opts)
LOG.debug("Zoning mode: %s", config.safe_get('zoning_mode'))
if config.safe_get('zoning_mode') == 'fabric':
LOG.debug("FC Zone Manager enabled.")
def create_lookup_service():
- config = Configuration(manager.volume_manager_opts)
+ config = configuration.Configuration(manager.volume_manager_opts)
LOG.debug("Zoning mode: %s", config.safe_get('zoning_mode'))
if config.safe_get('zoning_mode') == 'fabric':
LOG.debug("FC Lookup Service enabled.")
# Due to the upgrade to hacking 0.9.2 the following checking are
# ignored on purpose for the moment and should be re-enabled.
#
-# H302,H405
+# H405
-ignore = E251,H302,H402,H405,H803,H904
+ignore = E251,H402,H405,H803,H904
exclude = .git,.venv,.tox,dist,tools,doc,common,*egg,build
max-complexity=30