]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Adds the Violin Memory V7000 series FC driver.
authorRyan Lucio <rlucio@vmem.com>
Tue, 9 Jun 2015 20:17:53 +0000 (13:17 -0700)
committerRyan Lucio <rlucio@vmem.com>
Fri, 19 Jun 2015 20:38:32 +0000 (13:38 -0700)
This driver adds cinder volume support for VMEM 7300 and 7700 disk
arrays with fibrechannel HBAs.

DocImpact
Implements: blueprint vmem-7000-series-fc-driver
Change-Id: I516e12e699674fb0cdd9298f98d49bb14a2097ac

cinder/tests/unit/fake_vmem_client.py
cinder/tests/unit/test_v7000_common.py [new file with mode: 0644]
cinder/tests/unit/test_v7000_fcp.py [new file with mode: 0644]
cinder/volume/drivers/violin/v7000_common.py [new file with mode: 0644]
cinder/volume/drivers/violin/v7000_fcp.py [new file with mode: 0644]

index 25255a3c704652c002467729b63179659a3f89db..7b09c93fea62eda41471d715bb30d26daf974fbb 100644 (file)
@@ -21,8 +21,22 @@ import sys
 
 import mock
 
+
+# The following gymnastics to fake an exception class globally is done because
+# we want to globally model and make available certain exceptions.  If we do
+# not do this, then the real-driver's import will not see our fakes.
+class NoMatchingObjectIdError(Exception):
+    pass
+
+error = mock.Mock()
+error.NoMatchingObjectIdError = NoMatchingObjectIdError
+
+core = mock.Mock()
+core.attach_mock(error, 'error')
+
 vmemclient = mock.Mock()
 vmemclient.__version__ = "unknown"
+vmemclient.attach_mock(core, 'core')
 
 sys.modules['vmemclient'] = vmemclient
 
@@ -42,4 +56,10 @@ mock_client_conf = [
     'iscsi.create_iscsi_target',
     'iscsi.delete_iscsi_target',
     'igroup',
+    'client',
+    'client.get_client_info',
+    'client.create_client',
+    'client.delete_client',
+    'adapter',
+    'adapter.get_fc_info'
 ]
diff --git a/cinder/tests/unit/test_v7000_common.py b/cinder/tests/unit/test_v7000_common.py
new file mode 100644 (file)
index 0000000..5894b64
--- /dev/null
@@ -0,0 +1,777 @@
+# Copyright 2015 Violin Memory, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Tests for Violin Memory 7000 Series All-Flash Array Common Driver
+"""
+import math
+import mock
+
+from oslo_utils import units
+
+from cinder import context
+from cinder import exception
+from cinder import test
+from cinder.tests.unit import fake_vmem_client as vmemclient
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.violin import v7000_common
+from cinder.volume import volume_types
+
+
+VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
+VOLUME = {"name": "volume-" + VOLUME_ID,
+          "id": VOLUME_ID,
+          "display_name": "fake_volume",
+          "size": 2,
+          "host": "irrelevant",
+          "volume_type": None,
+          "volume_type_id": None,
+          }
+SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
+SNAPSHOT = {"name": "snapshot-" + SNAPSHOT_ID,
+            "id": SNAPSHOT_ID,
+            "volume_id": VOLUME_ID,
+            "volume_name": "volume-" + VOLUME_ID,
+            "volume_size": 2,
+            "display_name": "fake_snapshot",
+            }
+SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
+SRC_VOL = {"name": "volume-" + SRC_VOL_ID,
+           "id": SRC_VOL_ID,
+           "display_name": "fake_src_vol",
+           "size": 2,
+           "host": "irrelevant",
+           "volume_type": None,
+           "volume_type_id": None,
+           }
+INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
+CONNECTOR = {"initiator": INITIATOR_IQN}
+
+
+class V7000CommonTestCase(test.TestCase):
+    """Test case for Violin drivers."""
+    def setUp(self):
+        super(V7000CommonTestCase, self).setUp()
+        self.conf = self.setup_configuration()
+        self.driver = v7000_common.V7000Common(self.conf)
+        self.driver.container = 'myContainer'
+        self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
+        self.stats = {}
+
+    def tearDown(self):
+        super(V7000CommonTestCase, self).tearDown()
+
+    def setup_configuration(self):
+        config = mock.Mock(spec=conf.Configuration)
+        config.volume_backend_name = 'v7000_common'
+        config.san_ip = '1.1.1.1'
+        config.san_login = 'admin'
+        config.san_password = ''
+        config.san_thin_provision = False
+        config.san_is_local = False
+        config.gateway_mga = '2.2.2.2'
+        config.gateway_mgb = '3.3.3.3'
+        config.use_igroups = False
+        config.violin_request_timeout = 300
+        config.container = 'myContainer'
+        return config
+
+    @mock.patch('vmemclient.open')
+    def setup_mock_client(self, _m_client, m_conf=None):
+        """Create a fake backend communication factory.
+
+        The xg-tools creates a Concerto connection object (for V7000
+        devices) and returns it for use on a call to vmemclient.open().
+        """
+        # configure the concerto object mock with defaults
+        _m_concerto = mock.Mock(name='Concerto',
+                                version='1.1.1',
+                                spec=vmemclient.mock_client_conf)
+
+        # if m_conf, clobber the defaults with it
+        if m_conf:
+            _m_concerto.configure_mock(**m_conf)
+
+        # set calls to vmemclient.open() to return this mocked concerto object
+        _m_client.return_value = _m_concerto
+
+        return _m_client
+
+    def setup_mock_concerto(self, m_conf=None):
+        """Create a fake Concerto communication object."""
+        _m_concerto = mock.Mock(name='Concerto',
+                                version='1.1.1',
+                                spec=vmemclient.mock_client_conf)
+
+        if m_conf:
+            _m_concerto.configure_mock(**m_conf)
+
+        return _m_concerto
+
+    def test_check_for_setup_error(self):
+        """No setup errors are found."""
+        self.driver.vmem_mg = self.setup_mock_concerto()
+        self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
+
+        result = self.driver.check_for_setup_error()
+
+        self.driver._is_supported_vmos_version.assert_called_with(
+            self.driver.vmem_mg.version)
+        self.assertIsNone(result)
+
+    def test_create_lun(self):
+        """Lun is successfully created."""
+        response = {'success': True, 'msg': 'Create resource successfully.'}
+        size_in_mb = VOLUME['size'] * units.Ki
+
+        conf = {
+            'lun.create_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+
+        result = self.driver._create_lun(VOLUME)
+
+        self.driver._send_cmd.assert_called_with(
+            self.driver.vmem_mg.lun.create_lun,
+            'Create resource successfully.',
+            VOLUME['id'], size_in_mb, False, False, size_in_mb,
+            storage_pool=None)
+        self.assertIsNone(result)
+
+    def test_create_dedup_lun(self):
+        """Lun is successfully created."""
+        vol = VOLUME.copy()
+        vol['size'] = 100
+        vol['volume_type_id'] = '1'
+
+        response = {'success': True, 'msg': 'Create resource successfully.'}
+        size_in_mb = vol['size'] * units.Ki
+        full_size_mb = size_in_mb
+
+        conf = {
+            'lun.create_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+
+        # simulate extra specs of {'thin': 'true', 'dedupe': 'true'}
+        self.driver._get_volume_type_extra_spec = mock.Mock(
+            return_value="True")
+
+        self.driver._get_violin_extra_spec = mock.Mock(
+            return_value=None)
+
+        result = self.driver._create_lun(vol)
+
+        self.driver._send_cmd.assert_called_with(
+            self.driver.vmem_mg.lun.create_lun,
+            'Create resource successfully.',
+            VOLUME['id'], size_in_mb / 10, True, True, full_size_mb,
+            storage_pool=None)
+        self.assertIsNone(result)
+
+    def test_fail_extend_dedup_lun(self):
+        """Volume extend fails when new size would shrink the volume."""
+        failure = exception.VolumeDriverException
+        vol = VOLUME.copy()
+        vol['volume_type_id'] = '1'
+
+        size_in_mb = vol['size'] * units.Ki
+
+        self.driver.vmem_mg = self.setup_mock_concerto()
+
+        # simulate extra specs of {'thin': 'true', 'dedupe': 'true'}
+        self.driver._get_volume_type_extra_spec = mock.Mock(
+            return_value="True")
+
+        self.assertRaises(failure, self.driver._extend_lun,
+                          vol, size_in_mb)
+
+    def test_create_non_dedup_lun(self):
+        """Lun is successfully created."""
+        vol = VOLUME.copy()
+        vol['size'] = 100
+        vol['volume_type_id'] = '1'
+
+        response = {'success': True, 'msg': 'Create resource successfully.'}
+        size_in_mb = vol['size'] * units.Ki
+        full_size_mb = size_in_mb
+
+        conf = {
+            'lun.create_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+
+        # simulate extra specs of {'thin': 'false', 'dedupe': 'false'}
+        self.driver._get_volume_type_extra_spec = mock.Mock(
+            return_value="False")
+
+        self.driver._get_violin_extra_spec = mock.Mock(
+            return_value=None)
+
+        result = self.driver._create_lun(vol)
+
+        self.driver._send_cmd.assert_called_with(
+            self.driver.vmem_mg.lun.create_lun,
+            'Create resource successfully.',
+            VOLUME['id'], size_in_mb, False, False, full_size_mb,
+            storage_pool=None)
+        self.assertIsNone(result)
+
+    def test_create_lun_fails(self):
+        """Array returns error that the lun already exists."""
+        response = {'success': False,
+                    'msg': 'Duplicate Virtual Device name. Error: 0x90010022'}
+
+        conf = {
+            'lun.create_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+
+        self.assertIsNone(self.driver._create_lun(VOLUME))
+
+    def test_create_lun_on_a_storage_pool(self):
+        """Lun is successfully created."""
+        vol = VOLUME.copy()
+        vol['size'] = 100
+        vol['volume_type_id'] = '1'
+        response = {'success': True, 'msg': 'Create resource successfully.'}
+        size_in_mb = vol['size'] * units.Ki
+        full_size_mb = size_in_mb
+
+        conf = {
+            'lun.create_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+        self.driver._get_volume_type_extra_spec = mock.Mock(
+            return_value="False")
+
+        # simulates extra specs: {'storage_pool', 'StoragePool'}
+        self.driver._get_violin_extra_spec = mock.Mock(
+            return_value="StoragePool")
+
+        result = self.driver._create_lun(vol)
+
+        self.driver._send_cmd.assert_called_with(
+            self.driver.vmem_mg.lun.create_lun,
+            'Create resource successfully.',
+            VOLUME['id'], size_in_mb, False, False, full_size_mb,
+            storage_pool="StoragePool")
+        self.assertIsNone(result)
+
+    def test_delete_lun(self):
+        """Lun is deleted successfully."""
+        response = {'success': True, 'msg': 'Delete resource successfully'}
+        success_msgs = ['Delete resource successfully', '']
+
+        conf = {
+            'lun.delete_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+        self.driver._delete_lun_snapshot_bookkeeping = mock.Mock()
+
+        result = self.driver._delete_lun(VOLUME)
+
+        self.driver._send_cmd.assert_called_with(
+            self.driver.vmem_mg.lun.delete_lun,
+            success_msgs, VOLUME['id'], True)
+        self.driver._delete_lun_snapshot_bookkeeping.assert_called_with(
+            VOLUME['id'])
+
+        self.assertIsNone(result)
+
+    # TODO(rlucio) More delete lun failure cases to be added after
+    # collecting the possible responses from Concerto
+
+    def test_extend_lun(self):
+        """Volume extend completes successfully."""
+        new_volume_size = 10
+        change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki
+
+        response = {'success': True, 'message': 'Expand resource successfully'}
+
+        conf = {
+            'lun.extend_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+
+        result = self.driver._extend_lun(VOLUME, new_volume_size)
+
+        self.driver._send_cmd.assert_called_with(
+            self.driver.vmem_mg.lun.extend_lun,
+            response['message'], VOLUME['id'], change_in_size_mb)
+        self.assertIsNone(result)
+
+    def test_extend_lun_new_size_is_too_small(self):
+        """Volume extend fails when new size would shrink the volume."""
+        new_volume_size = 0
+        change_in_size_mb = (new_volume_size - VOLUME['size']) * units.Ki
+
+        response = {'success': False, 'msg': 'Invalid size. Error: 0x0902000c'}
+        failure = exception.ViolinBackendErr
+
+        conf = {
+            'lun.resize_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail'))
+
+        self.assertRaises(failure, self.driver._extend_lun,
+                          VOLUME, change_in_size_mb)
+
+    def test_create_volume_from_snapshot(self):
+        """Create a new cinder volume from a given snapshot of a lun."""
+        object_id = '12345'
+        vdev_id = 11111
+        response = {'success': True,
+                    'object_id': object_id,
+                    'msg': 'Copy TimeMark successfully.'}
+        lun_info = {'virtualDeviceID': vdev_id}
+        compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
+
+        conf = {
+            'lun.copy_snapshot_to_new_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._compress_snapshot_id = mock.Mock(
+            return_value=compressed_snap_id)
+        self.driver.vmem_mg.lun.get_lun_info = mock.Mock(return_value=lun_info)
+        self.driver._wait_for_lun_or_snap_copy = mock.Mock()
+
+        result = self.driver._create_volume_from_snapshot(SNAPSHOT, VOLUME)
+
+        self.driver.vmem_mg.lun.copy_snapshot_to_new_lun.assert_called_with(
+            source_lun=SNAPSHOT['volume_id'],
+            source_snapshot_comment=compressed_snap_id,
+            destination=VOLUME['id'], storage_pool=None)
+        self.driver.vmem_mg.lun.get_lun_info.assert_called_with(
+            object_id=object_id)
+        self.driver._wait_for_lun_or_snap_copy.assert_called_with(
+            SNAPSHOT['volume_id'], dest_vdev_id=vdev_id)
+
+        self.assertIsNone(result)
+
+    def test_create_volume_from_snapshot_on_a_storage_pool(self):
+        """Create a new cinder volume from a given snapshot of a lun."""
+        dest_vol = VOLUME.copy()
+        dest_vol['size'] = 100
+        dest_vol['volume_type_id'] = '1'
+        object_id = '12345'
+        vdev_id = 11111
+        response = {'success': True,
+                    'object_id': object_id,
+                    'msg': 'Copy TimeMark successfully.'}
+        lun_info = {'virtualDeviceID': vdev_id}
+        compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
+
+        conf = {
+            'lun.copy_snapshot_to_new_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._compress_snapshot_id = mock.Mock(
+            return_value=compressed_snap_id)
+        self.driver.vmem_mg.lun.get_lun_info = mock.Mock(return_value=lun_info)
+        self.driver._wait_for_lun_or_snap_copy = mock.Mock()
+
+        # simulates extra specs: {'storage_pool', 'StoragePool'}
+        self.driver._get_violin_extra_spec = mock.Mock(
+            return_value="StoragePool")
+
+        result = self.driver._create_volume_from_snapshot(SNAPSHOT, dest_vol)
+
+        self.assertIsNone(result)
+
+    def test_create_volume_from_snapshot_fails(self):
+        """Array returns error that the lun already exists."""
+        response = {'success': False,
+                    'msg': 'Duplicate Virtual Device name. Error: 0x90010022'}
+        compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
+        failure = exception.ViolinBackendErrExists
+
+        conf = {
+            'lun.copy_snapshot_to_new_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+        self.driver._compress_snapshot_id = mock.Mock(
+            return_value=compressed_snap_id)
+
+        self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail'))
+
+        self.assertRaises(failure, self.driver._create_volume_from_snapshot,
+                          SNAPSHOT, VOLUME)
+
+    def test_create_lun_from_lun(self):
+        """lun full clone to new volume completes successfully."""
+        object_id = '12345'
+        response = {'success': True,
+                    'object_id': object_id,
+                    'msg': 'Copy Snapshot resource successfully'}
+
+        conf = {
+            'lun.copy_lun_to_new_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._ensure_snapshot_resource_area = mock.Mock()
+        self.driver._wait_for_lun_or_snap_copy = mock.Mock()
+
+        result = self.driver._create_lun_from_lun(SRC_VOL, VOLUME)
+
+        self.driver._ensure_snapshot_resource_area.assert_called_with(
+            SRC_VOL['id'])
+        self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with(
+            source=SRC_VOL['id'], destination=VOLUME['id'], storage_pool=None)
+        self.driver._wait_for_lun_or_snap_copy.assert_called_with(
+            SRC_VOL['id'], dest_obj_id=object_id)
+
+        self.assertIsNone(result)
+
+    def test_create_lun_from_lun_on_a_storage_pool(self):
+
+        """lun full clone to new volume completes successfully."""
+        dest_vol = VOLUME.copy()
+        dest_vol['size'] = 100
+        dest_vol['volume_type_id'] = '1'
+        object_id = '12345'
+        response = {'success': True,
+                    'object_id': object_id,
+                    'msg': 'Copy Snapshot resource successfully'}
+
+        conf = {
+            'lun.copy_lun_to_new_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._ensure_snapshot_resource_area = mock.Mock()
+        self.driver._wait_for_lun_or_snap_copy = mock.Mock()
+
+        # simulates extra specs: {'storage_pool', 'StoragePool'}
+        self.driver._get_violin_extra_spec = mock.Mock(
+            return_value="StoragePool")
+
+        result = self.driver._create_lun_from_lun(SRC_VOL, dest_vol)
+
+        self.driver._ensure_snapshot_resource_area.assert_called_with(
+            SRC_VOL['id'])
+        self.driver.vmem_mg.lun.copy_lun_to_new_lun.assert_called_with(
+            source=SRC_VOL['id'], destination=dest_vol['id'],
+            storage_pool="StoragePool")
+        self.driver._wait_for_lun_or_snap_copy.assert_called_with(
+            SRC_VOL['id'], dest_obj_id=object_id)
+
+        self.assertIsNone(result)
+
+    def test_create_lun_from_lun_fails(self):
+        """lun full clone to new volume completes successfully."""
+        failure = exception.ViolinBackendErr
+        response = {'success': False,
+                    'msg': 'Snapshot Resource is not created '
+                    'for this virtual device. Error: 0x0901008c'}
+
+        conf = {
+            'lun.copy_lun_to_new_lun.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._ensure_snapshot_resource_area = mock.Mock()
+        self.driver._send_cmd = mock.Mock(side_effect=failure(message='fail'))
+
+        self.assertRaises(failure, self.driver._create_lun_from_lun,
+                          SRC_VOL, VOLUME)
+
+    def test_send_cmd(self):
+        """Command callback completes successfully."""
+        success_msg = 'success'
+        request_args = ['arg1', 'arg2', 'arg3']
+        response = {'success': True, 'msg': 'Operation successful'}
+
+        request_func = mock.Mock(return_value=response)
+
+        result = self.driver._send_cmd(request_func, success_msg, request_args)
+
+        self.assertEqual(response, result)
+
+    def test_send_cmd_request_timed_out(self):
+        """The callback retry timeout hits immediately."""
+        failure = exception.ViolinRequestRetryTimeout
+        success_msg = 'success'
+        request_args = ['arg1', 'arg2', 'arg3']
+        self.conf.violin_request_timeout = 0
+
+        request_func = mock.Mock()
+
+        self.assertRaises(failure, self.driver._send_cmd,
+                          request_func, success_msg, request_args)
+
+    def test_send_cmd_response_has_no_message(self):
+        """The callback returns no message on the first call."""
+        success_msg = 'success'
+        request_args = ['arg1', 'arg2', 'arg3']
+        response1 = {'success': True, 'msg': None}
+        response2 = {'success': True, 'msg': 'success'}
+
+        request_func = mock.Mock(side_effect=[response1, response2])
+
+        self.assertEqual(response2, self.driver._send_cmd
+                         (request_func, success_msg, request_args))
+
+    def test_check_error_code(self):
+        """Return an exception for a valid error code."""
+        failure = exception.ViolinBackendErr
+        response = {'success': False, 'msg': 'Error: 0x90000000'}
+        self.assertRaises(failure, self.driver._check_error_code,
+                          response)
+
+    def test_check_error_code_non_fatal_error(self):
+        """Returns no exception for a non-fatal error code."""
+        response = {'success': False, 'msg': 'Error: 0x9001003c'}
+        self.assertIsNone(self.driver._check_error_code(response))
+
+    def test_compress_snapshot_id(self):
+        test_snap_id = "12345678-abcd-1234-cdef-0123456789ab"
+        expected = "12345678abcd1234cdef0123456789ab"
+
+        self.assertTrue(len(expected) == 32)
+        result = self.driver._compress_snapshot_id(test_snap_id)
+        self.assertTrue(result == expected)
+
+    def test_ensure_snapshot_resource_area(self):
+        result_dict = {'success': True, 'res': 'Successful'}
+
+        self.driver.vmem_mg = self.setup_mock_concerto()
+        snap = self.driver.vmem_mg.snapshot
+        snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False)
+        snap.create_snapshot_resource = mock.Mock(return_value=result_dict)
+
+        with mock.patch('cinder.db.sqlalchemy.api.volume_get',
+                        return_value=VOLUME):
+            result = self.driver._ensure_snapshot_resource_area(VOLUME_ID)
+
+        self.assertIsNone(result)
+        snap.lun_has_a_snapshot_resource.assert_called_with(lun=VOLUME_ID)
+        snap.create_snapshot_resource.assert_called_with(
+            lun=VOLUME_ID,
+            size=int(math.ceil(0.2 * (VOLUME['size'] * 1024))),
+            enable_notification=False,
+            policy=v7000_common.CONCERTO_DEFAULT_SRA_POLICY,
+            enable_expansion=
+            v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION,
+            expansion_threshold=
+            v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD,
+            expansion_increment=
+            v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT,
+            expansion_max_size=
+            v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
+            enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
+            storage_pool=None)
+
+    def test_ensure_snapshot_resource_area_with_storage_pool(self):
+
+        dest_vol = VOLUME.copy()
+        dest_vol['size'] = 2
+        dest_vol['volume_type_id'] = '1'
+
+        result_dict = {'success': True, 'res': 'Successful'}
+
+        self.driver.vmem_mg = self.setup_mock_concerto()
+        snap = self.driver.vmem_mg.snapshot
+        snap.lun_has_a_snapshot_resource = mock.Mock(return_value=False)
+        snap.create_snapshot_resource = mock.Mock(return_value=result_dict)
+
+        # simulates extra specs: {'storage_pool', 'StoragePool'}
+        self.driver._get_violin_extra_spec = mock.Mock(
+            return_value="StoragePool")
+
+        with mock.patch('cinder.db.sqlalchemy.api.volume_get',
+                        return_value=dest_vol):
+            result = self.driver._ensure_snapshot_resource_area(VOLUME_ID)
+
+        self.assertIsNone(result)
+        snap.lun_has_a_snapshot_resource.assert_called_with(lun=VOLUME_ID)
+        snap.create_snapshot_resource.assert_called_with(
+            lun=VOLUME_ID,
+            size=int(math.ceil(0.2 * (VOLUME['size'] * 1024))),
+            enable_notification=False,
+            policy=v7000_common.CONCERTO_DEFAULT_SRA_POLICY,
+            enable_expansion=
+            v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION,
+            expansion_threshold=
+            v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD,
+            expansion_increment=
+            v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT,
+            expansion_max_size=
+            v7000_common.CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
+            enable_shrink=v7000_common.CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
+            storage_pool="StoragePool")
+
+    def test_ensure_snapshot_resource_policy(self):
+        result_dict = {'success': True, 'res': 'Successful'}
+
+        self.driver.vmem_mg = self.setup_mock_concerto()
+
+        snap = self.driver.vmem_mg.snapshot
+        snap.lun_has_a_snapshot_policy = mock.Mock(return_value=False)
+        snap.create_snapshot_policy = mock.Mock(return_value=result_dict)
+
+        result = self.driver._ensure_snapshot_policy(VOLUME_ID)
+        self.assertIsNone(result)
+        snap.lun_has_a_snapshot_policy.assert_called_with(lun=VOLUME_ID)
+
+        snap.create_snapshot_policy.assert_called_with(
+            lun=VOLUME_ID,
+            max_snapshots=v7000_common.CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS,
+            enable_replication=False,
+            enable_snapshot_schedule=False,
+            enable_cdp=False,
+            retention_mode=v7000_common.CONCERTO_DEFAULT_POLICY_RETENTION_MODE)
+
+    def test_delete_lun_snapshot_bookkeeping(self):
+        result_dict = {'success': True, 'res': 'Successful'}
+
+        self.driver.vmem_mg = self.setup_mock_concerto()
+        snap = self.driver.vmem_mg.snapshot
+        snap.get_snapshots = mock.Mock(
+            return_value=[],
+            side_effect=vmemclient.core.error.NoMatchingObjectIdError)
+        snap.delete_snapshot_policy = mock.Mock(return_value=result_dict)
+        snap.delete_snapshot_resource = mock.Mock()
+
+        result = self.driver._delete_lun_snapshot_bookkeeping(
+            volume_id=VOLUME_ID)
+
+        self.assertIsNone(result)
+
+        snap.get_snapshots.assert_called_with(VOLUME_ID)
+        snap.delete_snapshot_policy.assert_called_with(lun=VOLUME_ID)
+        snap.delete_snapshot_resource.assert_called_with(lun=VOLUME_ID)
+
+    def test_create_lun_snapshot(self):
+        response = {'success': True, 'msg': 'Create TimeMark successfully'}
+
+        self.driver.vmem_mg = self.setup_mock_concerto()
+        self.driver._ensure_snapshot_resource_area = (
+            mock.Mock(return_value=True))
+        self.driver._ensure_snapshot_policy = mock.Mock(return_value=True)
+        self.driver._send_cmd = mock.Mock(return_value=response)
+
+        with mock.patch('cinder.db.sqlalchemy.api.volume_get',
+                        return_value=VOLUME):
+            result = self.driver._create_lun_snapshot(SNAPSHOT)
+
+        self.assertIsNone(result)
+
+        self.driver._ensure_snapshot_resource_area.assert_called_with(
+            VOLUME_ID)
+        self.driver._ensure_snapshot_policy.assert_called_with(VOLUME_ID)
+        self.driver._send_cmd.assert_called_with(
+            self.driver.vmem_mg.snapshot.create_lun_snapshot,
+            'Create TimeMark successfully',
+            lun=VOLUME_ID,
+            comment=self.driver._compress_snapshot_id(SNAPSHOT_ID),
+            priority=v7000_common.CONCERTO_DEFAULT_PRIORITY,
+            enable_notification=False)
+
+    def test_delete_lun_snapshot(self):
+        response = {'success': True, 'msg': 'Delete TimeMark successfully'}
+        compressed_snap_id = 'abcdabcd1234abcd1234abcdeffedcbb'
+
+        self.driver.vmem_mg = self.setup_mock_concerto()
+        self.driver._send_cmd = mock.Mock(return_value=response)
+        self.driver._compress_snapshot_id = mock.Mock(
+            return_value=compressed_snap_id)
+
+        self.assertIsNone(self.driver._delete_lun_snapshot(SNAPSHOT))
+
+        self.driver._send_cmd.assert_called_with(
+            self.driver.vmem_mg.snapshot.delete_lun_snapshot,
+            'Delete TimeMark successfully',
+            lun=VOLUME_ID,
+            comment=compressed_snap_id)
+
+    def test_wait_for_lun_or_snap_copy_completes_for_snap(self):
+        """waiting for a snapshot to copy succeeds."""
+        vdev_id = 11111
+        response = (vdev_id, None, 100)
+
+        conf = {
+            'snapshot.get_snapshot_copy_status.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+
+        result = self.driver._wait_for_lun_or_snap_copy(
+            SRC_VOL['id'], dest_vdev_id=vdev_id)
+
+        (self.driver.vmem_mg.snapshot.get_snapshot_copy_status.
+         assert_called_with(SRC_VOL['id']))
+        self.assertTrue(result)
+
+    def test_wait_for_lun_or_snap_copy_completes_for_lun(self):
+        """waiting for a lun to copy succeeds."""
+        object_id = '12345'
+        response = (object_id, None, 100)
+
+        conf = {
+            'lun.get_lun_copy_status.return_value': response,
+        }
+        self.driver.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+
+        result = self.driver._wait_for_lun_or_snap_copy(
+            SRC_VOL['id'], dest_obj_id=object_id)
+
+        self.driver.vmem_mg.lun.get_lun_copy_status.assert_called_with(
+            SRC_VOL['id'])
+        self.assertTrue(result)
+
+    @mock.patch.object(context, 'get_admin_context')
+    @mock.patch.object(volume_types, 'get_volume_type')
+    def test_get_volume_type_extra_spec(self,
+                                        m_get_volume_type,
+                                        m_get_admin_context):
+        '''Volume_type extra specs are found successfully.'''
+        vol = VOLUME.copy()
+        vol['volume_type_id'] = 1
+        volume_type = {'extra_specs': {'override:test_key': 'test_value'}}
+
+        m_get_admin_context.return_value = None
+        m_get_volume_type.return_value = volume_type
+
+        result = self.driver._get_volume_type_extra_spec(vol, 'test_key')
+
+        m_get_admin_context.assert_called_with()
+        m_get_volume_type.assert_called_with(None, vol['volume_type_id'])
+        self.assertEqual('test_value', result)
+
+    @mock.patch.object(context, 'get_admin_context')
+    @mock.patch.object(volume_types, 'get_volume_type')
+    def test_get_violin_extra_spec(self,
+                                   m_get_volume_type,
+                                   m_get_admin_context):
+        '''Volume_type extra specs are found successfully.'''
+        vol = VOLUME.copy()
+        vol['volume_type_id'] = 1
+        volume_type = {'extra_specs': {'violin:test_key': 'test_value'}}
+
+        m_get_admin_context.return_value = None
+        m_get_volume_type.return_value = volume_type
+
+        result = self.driver._get_volume_type_extra_spec(vol, 'test_key')
+
+        m_get_admin_context.assert_called_with()
+        m_get_volume_type.assert_called_with(None, vol['volume_type_id'])
+        self.assertEqual('test_value', result)
diff --git a/cinder/tests/unit/test_v7000_fcp.py b/cinder/tests/unit/test_v7000_fcp.py
new file mode 100644 (file)
index 0000000..4f12eea
--- /dev/null
@@ -0,0 +1,563 @@
+# Copyright 2015 Violin Memory, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Tests for Violin Memory 7000 Series All-Flash Array Fibrechannel Driver
+"""
+
+import mock
+
+from cinder import exception
+from cinder import test
+from cinder.tests.unit import fake_vmem_client as vmemclient
+from cinder.volume import configuration as conf
+from cinder.volume.drivers.violin import v7000_common
+from cinder.volume.drivers.violin import v7000_fcp
+
+VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
+VOLUME = {
+    "name": "volume-" + VOLUME_ID,
+    "id": VOLUME_ID,
+    "display_name": "fake_volume",
+    "size": 2,
+    "host": "myhost",
+    "volume_type": None,
+    "volume_type_id": None,
+}
+SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
+SNAPSHOT = {
+    "name": "snapshot-" + SNAPSHOT_ID,
+    "id": SNAPSHOT_ID,
+    "volume_id": VOLUME_ID,
+    "volume_name": "volume-" + VOLUME_ID,
+    "volume_size": 2,
+    "display_name": "fake_snapshot",
+    "volume": VOLUME,
+}
+SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
+SRC_VOL = {
+    "name": "volume-" + SRC_VOL_ID,
+    "id": SRC_VOL_ID,
+    "display_name": "fake_src_vol",
+    "size": 2,
+    "host": "myhost",
+    "volume_type": None,
+    "volume_type_id": None,
+}
+INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
+CONNECTOR = {
+    "initiator": INITIATOR_IQN,
+    "host": "irrelevant",
+    'wwpns': ['50014380186b3f65', '50014380186b3f67'],
+}
+FC_TARGET_WWPNS = [
+    '31000024ff45fb22', '21000024ff45fb23',
+    '51000024ff45f1be', '41000024ff45f1bf'
+]
+FC_INITIATOR_WWPNS = [
+    '50014380186b3f65', '50014380186b3f67'
+]
+FC_FABRIC_MAP = {
+    'fabricA':
+    {'target_port_wwn_list': [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
+     'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[0]]},
+    'fabricB':
+    {'target_port_wwn_list': [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]],
+     'initiator_port_wwn_list': [FC_INITIATOR_WWPNS[1]]}
+}
+FC_INITIATOR_TARGET_MAP = {
+    FC_INITIATOR_WWPNS[0]: [FC_TARGET_WWPNS[0], FC_TARGET_WWPNS[1]],
+    FC_INITIATOR_WWPNS[1]: [FC_TARGET_WWPNS[2], FC_TARGET_WWPNS[3]]
+}
+
+PHY_DEVICES_RESPONSE = {
+    'data':
+    {'physical_devices':
+        [{'availsize': 1099504287744,
+          'availsize_mb': 524284,
+          'category': 'Virtual Device',
+          'connection_type': 'block',
+          'firmware': 'v1.0',
+          'guid': '3cc4d6dd-166d-77d2-4967-00005463f597',
+          'inquiry_string': '000002122b000032BKSC    OTHDISK-MFCN01  v1.0',
+          'is_foreign': True,
+          'name': 'BKSC:OTHDISK-MFCN01.000',
+          'object_id': '84b834fb-1f4d-5d3b-b7ae-5796f9868151',
+          'owner': 'google-public-dns-a.google.com',
+          'pool': None,
+          'product': 'OTHDISK-MFCN01',
+          'scsi_address':
+          {'adapter': '98',
+           'channel': '0',
+           'id': '0',
+           'lun': '0',
+           'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
+          'size': 1099504287744,
+          'size_mb': 1048569,
+          'type': 'Direct-Access',
+          'usedsize': 0,
+          'usedsize_mb': 0,
+          'vendor': 'BKSC',
+          'wwid': 'BKSC    OTHDISK-MFCN01  v1.0-0-0-00'},
+         {'availsize': 1099504287744,
+          'availsize_mb': 524284,
+          'category': 'Virtual Device',
+          'connection_type': 'block',
+          'firmware': 'v1.0',
+          'guid': '283b2694-192b-4745-6768-00005463f673',
+          'inquiry_string': '000002122b000032BKSC    OTHDISK-MFCN08  v1.0',
+          'is_foreign': False,
+          'name': 'BKSC:OTHDISK-MFCN08.000',
+          'object_id': '8555b888-bf43-5083-a433-f0c7b0282370',
+          'owner': 'google-public-dns-a.google.com',
+          'pool':
+          {'name': 'mga-pool',
+           'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
+          'product': 'OTHDISK-MFCN08',
+          'scsi_address':
+          {'adapter': '98',
+           'channel': '0',
+           'id': '11',
+           'lun': '0',
+           'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
+          'size': 1099504287744,
+          'size_mb': 1048569,
+          'type': 'Direct-Access',
+          'usedsize': 0,
+          'usedsize_mb': 0,
+          'vendor': 'BKSC',
+          'wwid': 'BKSC    OTHDISK-MFCN08  v1.0-0-0-00'},
+         {'availsize': 1099504287744,
+          'availsize_mb': 1048569,
+          'category': 'Virtual Device',
+          'connection_type': 'block',
+          'firmware': 'v1.0',
+          'guid': '7f47db19-019c-707d-0df1-00005463f949',
+          'inquiry_string': '000002122b000032BKSC    OTHDISK-MFCN09  v1.0',
+          'is_foreign': False,
+          'name': 'BKSC:OTHDISK-MFCN09.000',
+          'object_id': '62a98898-f8b8-5837-af2b-764f5a72e291',
+          'owner': 'a.b.c.d',
+          'pool':
+          {'name': 'mga-pool',
+           'object_id': '0818d3de-4437-535f-9cac-cc100a2c9313'},
+          'product': 'OTHDISK-MFCN09',
+          'scsi_address':
+          {'adapter': '98',
+           'channel': '0',
+           'id': '12',
+           'lun': '0',
+           'object_id': '6e0106fc-9c1c-52a2-95c9-396b7a653ac1'},
+          'size': 1099504287744,
+          'size_mb': 524284,
+          'type': 'Direct-Access',
+          'usedsize': 0,
+          'usedsize_mb': 0,
+          'vendor': 'BKSC',
+          'wwid': 'BKSC    OTHDISK-MFCN09  v1.0-0-0-00'}],
+        'total_physical_devices': 3},
+    'msg': 'Successful',
+    'success': True
+}
+
+# The FC_INFO dict returned by the backend is keyed on
+# object_id of the FC adapter and the values are the
+# wwmns
+FC_INFO = {
+    '1a3cdb6a-383d-5ba6-a50b-4ba598074510': ['2100001b9745e25e'],
+    '4a6bc10a-5547-5cc0-94f2-76222a8f8dff': ['2100001b9745e230'],
+    'b21bfff5-d89e-51ff-9920-d990a061d722': ['2100001b9745e25f'],
+    'b508cc6b-f78a-51f9-81cf-47c1aaf53dd1': ['2100001b9745e231']
+}
+
+CLIENT_INFO = {
+    'FCPolicy':
+    {'AS400enabled': False,
+     'VSAenabled': False,
+     'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
+                           '50-01-43-80-18-6b-3f-64']},
+    'FibreChannelDevices':
+    [{'access': 'ReadWrite',
+      'id': 'v0000004',
+      'initiatorWWPN': '*',
+      'lun': '8',
+      'name': 'abcdabcd-1234-abcd-1234-abcdeffedcba',
+      'sizeMB': 10240,
+      'targetWWPN': '*',
+      'type': 'SAN'}]
+}
+
+CLIENT_INFO1 = {
+    'FCPolicy':
+    {'AS400enabled': False,
+     'VSAenabled': False,
+     'initiatorWWPNList': ['50-01-43-80-18-6b-3f-66',
+                           '50-01-43-80-18-6b-3f-64']},
+    'FibreChannelDevices': []
+}
+
+
+class V7000FCPDriverTestCase(test.TestCase):
+    """Test cases for VMEM FCP driver."""
+    def setUp(self):
+        super(V7000FCPDriverTestCase, self).setUp()
+        self.conf = self.setup_configuration()
+        self.driver = v7000_fcp.V7000FCPDriver(configuration=self.conf)
+        self.driver.common.container = 'myContainer'
+        self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
+        self.driver.gateway_fc_wwns = FC_TARGET_WWPNS
+        self.stats = {}
+        self.driver.set_initialized()
+
+    def tearDown(self):
+        super(V7000FCPDriverTestCase, self).tearDown()
+
+    def setup_configuration(self):
+        config = mock.Mock(spec=conf.Configuration)
+        config.volume_backend_name = 'v7000_fcp'
+        config.san_ip = '8.8.8.8'
+        config.san_login = 'admin'
+        config.san_password = ''
+        config.san_thin_provision = False
+        config.san_is_local = False
+        config.request_timeout = 300
+        config.container = 'myContainer'
+        return config
+
+    def setup_mock_concerto(self, m_conf=None):
+        """Create a fake Concerto communication object."""
+        _m_concerto = mock.Mock(name='Concerto',
+                                version='1.1.1',
+                                spec=vmemclient.mock_client_conf)
+
+        if m_conf:
+            _m_concerto.configure_mock(**m_conf)
+
+        return _m_concerto
+
+    @mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
+    def test_check_for_setup_error(self, m_setup_func):
+        """No setup errors are found."""
+        result = self.driver.check_for_setup_error()
+        m_setup_func.assert_called_with()
+        self.assertIsNone(result)
+
+    @mock.patch.object(v7000_common.V7000Common, 'check_for_setup_error')
+    def test_check_for_setup_error_no_wwn_config(self, m_setup_func):
+        """No wwns were found during setup."""
+        self.driver.gateway_fc_wwns = []
+        failure = exception.ViolinInvalidBackendConfig
+        self.assertRaises(failure, self.driver.check_for_setup_error)
+
+    def test_create_volume(self):
+        """Volume created successfully."""
+        self.driver.common._create_lun = mock.Mock()
+
+        result = self.driver.create_volume(VOLUME)
+
+        self.driver.common._create_lun.assert_called_with(VOLUME)
+        self.assertIsNone(result)
+
+    def test_create_volume_from_snapshot(self):
+        self.driver.common._create_volume_from_snapshot = mock.Mock()
+
+        result = self.driver.create_volume_from_snapshot(VOLUME, SNAPSHOT)
+
+        self.driver.common._create_volume_from_snapshot.assert_called_with(
+            SNAPSHOT, VOLUME)
+
+        self.assertIsNone(result)
+
+    def test_create_cloned_volume(self):
+        self.driver.common._create_lun_from_lun = mock.Mock()
+
+        result = self.driver.create_cloned_volume(VOLUME, SRC_VOL)
+
+        self.driver.common._create_lun_from_lun.assert_called_with(
+            SRC_VOL, VOLUME)
+        self.assertIsNone(result)
+
+    def test_delete_volume(self):
+        """Volume deleted successfully."""
+        self.driver.common._delete_lun = mock.Mock()
+
+        result = self.driver.delete_volume(VOLUME)
+
+        self.driver.common._delete_lun.assert_called_with(VOLUME)
+        self.assertIsNone(result)
+
+    def test_extend_volume(self):
+        """Volume extended successfully."""
+        new_size = 10
+        self.driver.common._extend_lun = mock.Mock()
+
+        result = self.driver.extend_volume(VOLUME, new_size)
+
+        self.driver.common._extend_lun.assert_called_with(VOLUME, new_size)
+        self.assertIsNone(result)
+
+    def test_create_snapshot(self):
+        self.driver.common._create_lun_snapshot = mock.Mock()
+
+        result = self.driver.create_snapshot(SNAPSHOT)
+        self.driver.common._create_lun_snapshot.assert_called_with(SNAPSHOT)
+        self.assertIsNone(result)
+
+    def test_delete_snapshot(self):
+        self.driver.common._delete_lun_snapshot = mock.Mock()
+
+        result = self.driver.delete_snapshot(SNAPSHOT)
+        self.driver.common._delete_lun_snapshot.assert_called_with(SNAPSHOT)
+        self.assertIsNone(result)
+
+    def test_get_volume_stats(self):
+        self.driver._update_volume_stats = mock.Mock()
+        self.driver._update_volume_stats()
+
+        result = self.driver.get_volume_stats(True)
+
+        self.driver._update_volume_stats.assert_called_with()
+        self.assertEqual(self.driver.stats, result)
+
+    def test_update_volume_stats(self):
+        """Makes a mock query to the backend to collect
+           stats on all physical devices.
+        """
+        backend_name = self.conf.volume_backend_name
+        vendor_name = "Violin Memory, Inc."
+        tot_gb = 2046
+        free_gb = 1022
+
+        phy_devices = "/batch/physicalresource/physicaldevice"
+
+        conf = {
+            'basic.get.side_effect': [PHY_DEVICES_RESPONSE, ],
+        }
+
+        self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+
+        result = self.driver._update_volume_stats()
+
+        calls = [mock.call(phy_devices)]
+        self.driver.common.vmem_mg.basic.get.assert_has_calls(calls)
+        self.assertEqual(tot_gb, self.driver.stats['total_capacity_gb'])
+        self.assertEqual(free_gb, self.driver.stats['free_capacity_gb'])
+        self.assertEqual(backend_name,
+                         self.driver.stats['volume_backend_name'])
+        self.assertEqual(vendor_name, self.driver.stats['vendor_name'])
+        self.assertIsNone(result)
+
+    def test_get_active_fc_targets(self):
+        """Makes a mock query to the backend to collect
+           all the physical adapters and extract the WWNs
+        """
+
+        conf = {
+            'adapter.get_fc_info.return_value': FC_INFO,
+        }
+
+        self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+
+        result = self.driver._get_active_fc_targets()
+
+        self.assertEqual(['2100001b9745e230', '2100001b9745e25f',
+                          '2100001b9745e231', '2100001b9745e25e'],
+                         result)
+
+    def test_initialize_connection(self):
+        lun_id = 1
+        target_wwns = self.driver.gateway_fc_wwns
+        init_targ_map = {}
+
+        conf = {
+            'client.create_client.return_value': None,
+        }
+        self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+        self.driver._export_lun = mock.Mock(return_value=lun_id)
+        self.driver._build_initiator_target_map = mock.Mock(
+            return_value=(target_wwns, init_targ_map))
+
+        props = self.driver.initialize_connection(VOLUME, CONNECTOR)
+
+        self.driver.common.vmem_mg.client.create_client.assert_called_with(
+            name=CONNECTOR['host'], proto='FC', fc_wwns=CONNECTOR['wwpns'])
+        self.driver._export_lun.assert_called_with(VOLUME, CONNECTOR)
+        self.driver._build_initiator_target_map.assert_called_with(
+            CONNECTOR)
+        self.assertEqual(props['driver_volume_type'], "fibre_channel")
+        self.assertEqual(props['data']['target_discovered'], True)
+        self.assertEqual(props['data']['target_wwn'],
+                         self.driver.gateway_fc_wwns)
+        self.assertEqual(props['data']['target_lun'], lun_id)
+
+    def test_terminate_connection(self):
+        target_wwns = self.driver.gateway_fc_wwns
+        init_targ_map = {}
+
+        self.driver.common.vmem_mg = self.setup_mock_concerto()
+        self.driver._unexport_lun = mock.Mock()
+        self.driver._is_initiator_connected_to_array = mock.Mock(
+            return_value=False)
+        self.driver._build_initiator_target_map = mock.Mock(
+            return_value=(target_wwns, init_targ_map))
+
+        props = self.driver.terminate_connection(VOLUME, CONNECTOR)
+
+        self.driver._unexport_lun.assert_called_with(VOLUME, CONNECTOR)
+        self.driver._is_initiator_connected_to_array.assert_called_with(
+            CONNECTOR)
+        self.driver._build_initiator_target_map.assert_called_with(
+            CONNECTOR)
+        self.assertEqual("fibre_channel", props['driver_volume_type'])
+        self.assertEqual(target_wwns, props['data']['target_wwn'])
+        self.assertEqual(init_targ_map, props['data']['initiator_target_map'])
+
+    def test_export_lun(self):
+        lun_id = '1'
+        response = {'success': True, 'msg': 'Assign SAN client successfully'}
+
+        conf = {
+            'client.get_client_info.return_value': CLIENT_INFO,
+        }
+        self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+
+        self.driver.common._send_cmd_and_verify = mock.Mock(
+            return_value=response)
+
+        self.driver._get_lun_id = mock.Mock(return_value=lun_id)
+
+        result = self.driver._export_lun(VOLUME, CONNECTOR)
+
+        self.driver.common._send_cmd_and_verify.assert_called_with(
+            self.driver.common.vmem_mg.lun.assign_lun_to_client,
+            self.driver._is_lun_id_ready,
+            'Assign SAN client successfully',
+            [VOLUME['id'], CONNECTOR['host'], "ReadWrite"],
+            [VOLUME['id'], CONNECTOR['host']])
+        self.driver._get_lun_id.assert_called_with(
+            VOLUME['id'], CONNECTOR['host'])
+        self.assertEqual(lun_id, result)
+
+    def test_export_lun_fails_with_exception(self):
+        lun_id = '1'
+        response = {'status': False, 'msg': 'Generic error'}
+        failure = exception.ViolinBackendErr
+
+        self.driver.common.vmem_mg = self.setup_mock_concerto()
+        self.driver.common._send_cmd_and_verify = mock.Mock(
+            side_effect=exception.ViolinBackendErr(response['msg']))
+        self.driver._get_lun_id = mock.Mock(return_value=lun_id)
+
+        self.assertRaises(failure, self.driver._export_lun, VOLUME, CONNECTOR)
+
+    def test_unexport_lun(self):
+        response = {'success': True, 'msg': 'Unassign SAN client successfully'}
+
+        self.driver.common.vmem_mg = self.setup_mock_concerto()
+        self.driver.common._send_cmd = mock.Mock(
+            return_value=response)
+
+        result = self.driver._unexport_lun(VOLUME, CONNECTOR)
+
+        self.driver.common._send_cmd.assert_called_with(
+            self.driver.common.vmem_mg.lun.unassign_client_lun,
+            "Unassign SAN client successfully",
+            VOLUME['id'], CONNECTOR['host'], True)
+        self.assertIsNone(result)
+
+    def test_get_lun_id(self):
+
+        conf = {
+            'client.get_client_info.return_value': CLIENT_INFO,
+        }
+        self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+
+        result = self.driver._get_lun_id(VOLUME['id'], CONNECTOR['host'])
+
+        self.assertEqual(8, result)
+
+    def test_is_lun_id_ready(self):
+        lun_id = '1'
+        self.driver.common.vmem_mg = self.setup_mock_concerto()
+
+        self.driver._get_lun_id = mock.Mock(return_value=lun_id)
+
+        result = self.driver._is_lun_id_ready(
+            VOLUME['id'], CONNECTOR['host'])
+        self.assertTrue(result)
+
+    def test_build_initiator_target_map(self):
+        """Successfully build a map when zoning is enabled."""
+        expected_targ_wwns = FC_TARGET_WWPNS
+
+        self.driver.lookup_service = mock.Mock()
+        (self.driver.lookup_service.get_device_mapping_from_network.
+         return_value) = FC_FABRIC_MAP
+
+        result = self.driver._build_initiator_target_map(CONNECTOR)
+        (targ_wwns, init_targ_map) = result
+
+        (self.driver.lookup_service.get_device_mapping_from_network.
+         assert_called_with(CONNECTOR['wwpns'], self.driver.gateway_fc_wwns))
+        self.assertEqual(set(expected_targ_wwns), set(targ_wwns))
+
+        i = FC_INITIATOR_WWPNS[0]
+        self.assertIn(FC_TARGET_WWPNS[0], init_targ_map[i])
+        self.assertIn(FC_TARGET_WWPNS[1], init_targ_map[i])
+        self.assertEqual(2, len(init_targ_map[i]))
+
+        i = FC_INITIATOR_WWPNS[1]
+        self.assertIn(FC_TARGET_WWPNS[2], init_targ_map[i])
+        self.assertIn(FC_TARGET_WWPNS[3], init_targ_map[i])
+        self.assertEqual(2, len(init_targ_map[i]))
+
+        self.assertEqual(2, len(init_targ_map))
+
+    def test_build_initiator_target_map_no_lookup_service(self):
+        """Successfully build a map when zoning is disabled."""
+        expected_targ_wwns = FC_TARGET_WWPNS
+        expected_init_targ_map = {
+            CONNECTOR['wwpns'][0]: FC_TARGET_WWPNS,
+            CONNECTOR['wwpns'][1]: FC_TARGET_WWPNS
+        }
+        self.driver.lookup_service = None
+
+        targ_wwns, init_targ_map = self.driver._build_initiator_target_map(
+            CONNECTOR)
+
+        self.assertEqual(expected_targ_wwns, targ_wwns)
+        self.assertEqual(expected_init_targ_map, init_targ_map)
+
+    def test_is_initiator_connected_to_array(self):
+        """Successfully finds an initiator with remaining active session."""
+        conf = {
+            'client.get_client_info.return_value': CLIENT_INFO,
+        }
+        self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+
+        self.assertTrue(self.driver._is_initiator_connected_to_array(
+            CONNECTOR))
+        self.driver.common.vmem_mg.client.get_client_info.assert_called_with(
+            CONNECTOR['host'])
+
+    def test_is_initiator_connected_to_array_empty_response(self):
+        """Successfully finds no initiators with remaining active sessions."""
+        conf = {
+            'client.get_client_info.return_value': CLIENT_INFO1
+        }
+        self.driver.common.vmem_mg = self.setup_mock_concerto(m_conf=conf)
+
+        self.assertFalse(self.driver._is_initiator_connected_to_array(
+            CONNECTOR))
diff --git a/cinder/volume/drivers/violin/v7000_common.py b/cinder/volume/drivers/violin/v7000_common.py
new file mode 100644 (file)
index 0000000..fbc01e6
--- /dev/null
@@ -0,0 +1,854 @@
+# Copyright 2015 Violin Memory, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Violin Memory 7000 Series All-Flash Array Common Driver for OpenStack Cinder
+
+Provides common (ie., non-protocol specific) management functions for
+V7000 series flash arrays.
+
+Backend array communication is handled via VMEM's python library
+called 'vmemclient'.
+
+NOTE: this driver file requires the use of synchronization points for
+certain types of backend operations, and as a result may not work
+properly in an active-active HA configuration.  See OpenStack Cinder
+driver documentation for more information.
+"""
+
+import math
+import re
+import six
+import time
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import units
+
+from cinder import context
+from cinder.db.sqlalchemy import api
+from cinder import exception
+from cinder.i18n import _, _LE, _LI
+from cinder.openstack.common import loopingcall
+from cinder import utils
+from cinder.volume import volume_types
+
+
+LOG = logging.getLogger(__name__)
+
+try:
+    import vmemclient
+except ImportError:
+    vmemclient = None
+else:
+    LOG.info(_LI("Running with vmemclient version: %s"),
+             vmemclient.__version__)
+
+
+CONCERTO_SUPPORTED_VERSION_PATTERNS = ['Version 7.[0-9].?[0-9]?']
+CONCERTO_DEFAULT_PRIORITY = 'medium'
+CONCERTO_DEFAULT_SRA_POLICY = 'preserveAll'
+CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION = True
+CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD = 50
+CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT = '1024MB'
+CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE = None
+CONCERTO_DEFAULT_SRA_ENABLE_SHRINK = False
+CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS = 1000
+CONCERTO_DEFAULT_POLICY_RETENTION_MODE = 'All'
+
+
+violin_opts = [
+    cfg.IntOpt('violin_request_timeout',
+               default=300,
+               help='Global backend request timeout, in seconds.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(violin_opts)
+
+
+class V7000Common(object):
+    """Contains common code for the Violin V7000 drivers."""
+
+    def __init__(self, config):
+        self.vmem_mg = None
+        self.container = ""
+        self.config = config
+
+    def do_setup(self, context):
+        """Any initialization the driver does while starting."""
+        if not self.config.san_ip:
+            raise exception.InvalidInput(
+                reason=_('Gateway VIP is not set'))
+
+        self.vmem_mg = vmemclient.open(self.config.san_ip,
+                                       self.config.san_login,
+                                       self.config.san_password,
+                                       keepalive=True)
+
+        if self.vmem_mg is None:
+            msg = _('Failed to connect to array')
+            raise exception.VolumeBackendAPIException(data=msg)
+
+    def check_for_setup_error(self):
+        """Returns an error if prerequisites aren't met."""
+        if vmemclient is None:
+            msg = _('vmemclient python library not found')
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        LOG.info(_LI("CONCERTO version: %s"), self.vmem_mg.version)
+
+        if not self._is_supported_vmos_version(self.vmem_mg.version):
+            msg = _('CONCERTO version is not supported')
+            raise exception.ViolinInvalidBackendConfig(reason=msg)
+
+    @utils.synchronized('vmem-lun')
+    def _create_lun(self, volume):
+        """Creates a new lun.
+
+        :param volume:  volume object provided by the Manager
+        """
+        thin_lun = False
+        dedup = False
+        size_mb = volume['size'] * units.Ki
+        full_size_mb = size_mb
+        pool = None
+
+        LOG.debug("Creating LUN %(name)s, %(size)s MB.",
+                  {'name': volume['name'], 'size': size_mb})
+
+        if self.config.san_thin_provision:
+            thin_lun = True
+            # Set the actual allocation size for thin lun
+            # default here is 10%
+            size_mb = size_mb / 10
+
+        typeid = volume['volume_type_id']
+        if typeid:
+            # extra_specs with thin specified overrides san_thin_provision
+            spec_value = self._get_volume_type_extra_spec(volume, "thin")
+            if spec_value and spec_value.lower() == "true":
+                thin_lun = True
+                # Set the actual allocation size for thin lun
+                # default here is 10%
+                size_mb = size_mb / 10
+
+            spec_value = self._get_volume_type_extra_spec(volume, "dedup")
+            if spec_value and spec_value.lower() == "true":
+                dedup = True
+                # A dedup lun is always a thin lun
+                thin_lun = True
+                # Set the actual allocation size for thin lun
+                # default here is 10%. The actual allocation may
+                # different, depending on other factors
+                size_mb = full_size_mb / 10
+
+            # Extract the storage_pool name if one is specified
+            pool = self._get_violin_extra_spec(volume, "storage_pool")
+
+        try:
+            # Note: In the following create_lun command for setting up a dedup
+            # or thin lun the size_mb parameter is ignored and 10% of the
+            # full_size_mb specified is the size actually allocated to
+            # the lun. full_size_mb is the size the lun is allowed to
+            # grow. On the other hand, if it is a thick lun, the
+            # full_size_mb is ignored and size_mb is the actual
+            # allocated size of the lun.
+
+            self._send_cmd(self.vmem_mg.lun.create_lun,
+                           "Create resource successfully.",
+                           volume['id'], size_mb, dedup,
+                           thin_lun, full_size_mb, storage_pool=pool)
+
+        except Exception:
+            LOG.exception(_LE("Lun create for %s failed!"), volume['id'])
+            raise
+
+    @utils.synchronized('vmem-lun')
+    def _delete_lun(self, volume):
+        """Deletes a lun.
+
+        :param volume:  volume object provided by the Manager
+        """
+        success_msgs = ['Delete resource successfully', '']
+
+        LOG.debug("Deleting lun %s.", volume['id'])
+
+        try:
+            # If the LUN has ever had a snapshot, it has an SRA and
+            # policy that must be deleted first.
+            self._delete_lun_snapshot_bookkeeping(volume['id'])
+
+            # TODO(rdl) force the delete for now to deal with pending
+            # snapshot issues.  Should revisit later for a better fix.
+            self._send_cmd(self.vmem_mg.lun.delete_lun,
+                           success_msgs, volume['id'], True)
+
+        except exception.VolumeBackendAPIException:
+            LOG.exception(_LE("Lun %s has dependent snapshots, "
+                              "skipping lun deletion."), volume['id'])
+            raise exception.VolumeIsBusy(volume_name=volume['id'])
+
+        except Exception:
+            LOG.exception(_LE("Lun delete for %s failed!"), volume['id'])
+            raise
+
+    def _extend_lun(self, volume, new_size):
+        """Extend an existing volume's size.
+
+        :param volume:  volume object provided by the Manager
+        :param new_size:  new size in GB to be applied
+        """
+        v = self.vmem_mg
+
+        typeid = volume['volume_type_id']
+        if typeid:
+            spec_value = self._get_volume_type_extra_spec(volume, "dedup")
+            if spec_value and spec_value.lower() == "true":
+                # A Dedup lun's size cannot be modified in Concerto.
+                msg = _('Dedup luns cannot be extended')
+                raise exception.VolumeDriverException(message=msg)
+
+        size_mb = volume['size'] * units.Ki
+        new_size_mb = new_size * units.Ki
+
+        # Concerto lun extend requires number of MB to increase size by,
+        # not the final size value.
+        #
+        delta_mb = new_size_mb - size_mb
+
+        LOG.debug("Extending lun %(id)s, from %(size)s to %(new_size)s MB.",
+                  {'id': volume['id'], 'size': size_mb,
+                   'new_size': new_size_mb})
+
+        try:
+            self._send_cmd(v.lun.extend_lun,
+                           "Expand resource successfully",
+                           volume['id'], delta_mb)
+
+        except Exception:
+            LOG.exception(_LE("LUN extend failed!"))
+            raise
+
+    def _create_lun_snapshot(self, snapshot):
+        """Create a new cinder snapshot on a volume.
+
+        This maps onto a Concerto 'timemark', but we must always first
+        ensure that a snapshot resource area (SRA) exists, and that a
+        snapshot policy exists.
+
+        :param snapshot:  cinder snapshot object provided by the Manager
+
+        Exceptions:
+            VolumeBackendAPIException: If SRA could not be created, or
+                snapshot policy could not be created
+            RequestRetryTimeout: If backend could not complete the request
+                within the allotted timeout.
+            ViolinBackendErr: If backend reports an error during the
+                create snapshot phase.
+        """
+
+        cinder_volume_id = snapshot['volume_id']
+        cinder_snapshot_id = snapshot['id']
+
+        LOG.debug("Creating LUN snapshot %(snap_id)s on volume "
+                  "%(vol_id)s %(dpy_name)s.",
+                  {'snap_id': cinder_snapshot_id,
+                   'vol_id': cinder_volume_id,
+                   'dpy_name': snapshot['display_name']})
+
+        self._ensure_snapshot_resource_area(cinder_volume_id)
+
+        self._ensure_snapshot_policy(cinder_volume_id)
+
+        try:
+            self._send_cmd(
+                self.vmem_mg.snapshot.create_lun_snapshot,
+                "Create TimeMark successfully",
+                lun=cinder_volume_id,
+                comment=self._compress_snapshot_id(cinder_snapshot_id),
+                priority=CONCERTO_DEFAULT_PRIORITY,
+                enable_notification=False)
+        except Exception:
+            LOG.exception(_LE("Lun create snapshot for "
+                              "volume %(vol)s snapshot %(snap)s failed!"),
+                          {'vol': cinder_volume_id,
+                           'snap': cinder_snapshot_id})
+            raise
+
+    def _delete_lun_snapshot(self, snapshot):
+        """Delete the specified cinder snapshot.
+
+        :param snapshot:  cinder snapshot object provided by the Manager
+
+        Exceptions:
+            RequestRetryTimeout: If backend could not complete the request
+                within the allotted timeout.
+            ViolinBackendErr: If backend reports an error during the
+                delete snapshot phase.
+        """
+        cinder_volume_id = snapshot['volume_id']
+        cinder_snapshot_id = snapshot['id']
+        LOG.debug("Deleting snapshot %(snap_id)s on volume "
+                  "%(vol_id)s %(dpy_name)s",
+                  {'snap_id': cinder_snapshot_id,
+                   'vol_id': cinder_volume_id,
+                   'dpy_name': snapshot['display_name']})
+
+        try:
+            self._send_cmd(
+                self.vmem_mg.snapshot.delete_lun_snapshot,
+                "Delete TimeMark successfully",
+                lun=cinder_volume_id,
+                comment=self._compress_snapshot_id(cinder_snapshot_id))
+
+        except Exception:
+            LOG.exception(_LE("Lun delete snapshot for "
+                              "volume %(vol)s snapshot %(snap)s failed!"),
+                          {'vol': cinder_volume_id,
+                           'snap': cinder_snapshot_id})
+            raise
+
+    def _create_volume_from_snapshot(self, snapshot, volume):
+        """Create a new cinder volume from a given snapshot of a lun
+
+        This maps onto a Concerto 'copy  snapshot to lun'. Concerto
+        creates the lun and then copies the snapshot into it.
+
+        :param snapshot:  cinder snapshot object provided by the Manager
+        :param volume:  cinder volume to be created
+        """
+
+        cinder_volume_id = volume['id']
+        cinder_snapshot_id = snapshot['id']
+        pool = None
+        result = None
+
+        LOG.debug("Copying snapshot %(snap_id)s onto volume %(vol_id)s.",
+                  {'snap_id': cinder_snapshot_id,
+                   'vol_id': cinder_volume_id})
+
+        typeid = volume['volume_type_id']
+        if typeid:
+            pool = self._get_violin_extra_spec(volume, "storage_pool")
+
+        try:
+            result = self.vmem_mg.lun.copy_snapshot_to_new_lun(
+                source_lun=snapshot['volume_id'],
+                source_snapshot_comment=
+                self._compress_snapshot_id(cinder_snapshot_id),
+                destination=cinder_volume_id,
+                storage_pool=pool)
+
+            if not result['success']:
+                self._check_error_code(result)
+
+        except Exception:
+            LOG.exception(_LE("Copy snapshot to volume for "
+                              "snapshot %(snap)s volume %(vol)s failed!"),
+                          {'snap': cinder_snapshot_id,
+                           'vol': cinder_volume_id})
+            raise
+
+        # get the destination lun info and extract virtualdeviceid
+        info = self.vmem_mg.lun.get_lun_info(object_id=result['object_id'])
+
+        self._wait_for_lun_or_snap_copy(
+            snapshot['volume_id'], dest_vdev_id=info['virtualDeviceID'])
+
+    def _create_lun_from_lun(self, src_vol, dest_vol):
+        """Copy the contents of a lun to a new lun (i.e., full clone).
+
+        :param src_vol:  cinder volume to clone
+        :param dest_vol:  cinder volume to be created
+        """
+        pool = None
+        result = None
+
+        LOG.debug("Copying lun %(src_vol_id)s onto lun %(dest_vol_id)s.",
+                  {'src_vol_id': src_vol['id'],
+                   'dest_vol_id': dest_vol['id']})
+
+        # Extract the storage_pool name if one is specified
+        typeid = dest_vol['volume_type_id']
+        if typeid:
+            pool = self._get_violin_extra_spec(dest_vol, "storage_pool")
+
+        try:
+            # in order to do a full clone the source lun must have a
+            # snapshot resource
+            self._ensure_snapshot_resource_area(src_vol['id'])
+
+            result = self.vmem_mg.lun.copy_lun_to_new_lun(
+                source=src_vol['id'], destination=dest_vol['id'],
+                storage_pool=pool)
+
+            if not result['success']:
+                self._check_error_code(result)
+
+        except Exception:
+            LOG.exception(_LE("Create new lun from lun for source "
+                              "%(src)s => destination %(dest)s failed!"),
+                          {'src': src_vol['id'], 'dest': dest_vol['id']})
+            raise
+
+        self._wait_for_lun_or_snap_copy(
+            src_vol['id'], dest_obj_id=result['object_id'])
+
+    def _send_cmd(self, request_func, success_msgs, *args, **kwargs):
+        """Run an XG request function, and retry as needed.
+
+        The request will be retried until it returns a success
+        message, a failure message, or the global request timeout is
+        hit.
+
+        This wrapper is meant to deal with backend requests that can
+        fail for any variety of reasons, for instance, when the system
+        is already busy handling other LUN requests. If there is no
+        space left, or other "fatal" errors are returned (see
+        _fatal_error_code() for a list of all known error conditions).
+
+        :param request_func:  XG api method to call
+        :param success_msgs:  Success messages expected from the backend
+        :param *args:  argument array to be passed to the request_func
+        :param **kwargs:  argument dictionary to be passed to request_func
+        :returns: the response dict from the last XG call
+        """
+        resp = {}
+        start = time.time()
+        done = False
+
+        if isinstance(success_msgs, basestring):
+            success_msgs = [success_msgs]
+
+        while not done:
+            if time.time() - start >= self.config.violin_request_timeout:
+                raise exception.ViolinRequestRetryTimeout(
+                    timeout=self.config.violin_request_timeout)
+
+            resp = request_func(*args, **kwargs)
+
+            if not resp['msg']:
+                # XG requests will return None for a message if no message
+                # string is passed in the raw response
+                resp['msg'] = ''
+
+            for msg in success_msgs:
+                if resp['success'] and msg in resp['msg']:
+                    done = True
+                    break
+
+            if not resp['success']:
+                self._check_error_code(resp)
+                done = True
+                break
+
+        return resp
+
+    def _send_cmd_and_verify(self, request_func, verify_func,
+                             request_success_msgs='', rargs=None, vargs=None):
+        """Run an XG request function, retry if needed, and verify success.
+
+        If the verification fails, then retry the request/verify cycle
+        until both functions are successful, the request function
+        returns a failure message, or the global request timeout is
+        hit.
+
+        This wrapper is meant to deal with backend requests that can
+        fail for any variety of reasons, for instance, when the system
+        is already busy handling other LUN requests.  It is also smart
+        enough to give up if clustering is down (eg no HA available),
+        there is no space left, or other "fatal" errors are returned
+        (see _fatal_error_code() for a list of all known error
+        conditions).
+
+        :param request_func:  XG api method to call
+        :param verify_func:  function call to verify request was completed
+        :param request_success_msg:  Success message expected for request_func
+        :param *rargs:  argument array to be passed to request_func
+        :param *vargs:  argument array to be passed to verify_func
+        :returns: the response dict from the last XG call
+        """
+        resp = {}
+        start = time.time()
+        request_needed = True
+        verify_needed = True
+
+        if isinstance(request_success_msgs, basestring):
+            request_success_msgs = [request_success_msgs]
+
+        rargs = rargs if rargs else []
+        vargs = vargs if vargs else []
+
+        while request_needed or verify_needed:
+            if time.time() - start >= self.config.violin_request_timeout:
+                raise exception.ViolinRequestRetryTimeout(
+                    timeout=self.config.violin_request_timeout)
+
+            if request_needed:
+                resp = request_func(*rargs)
+
+                if not resp['msg']:
+                    # XG requests will return None for a message if no message
+                    # string is passed in the raw response
+                    resp['msg'] = ''
+
+                for msg in request_success_msgs:
+                    if resp['success'] and msg in resp['msg']:
+                        request_needed = False
+                        break
+
+                if not resp['success']:
+                    self._check_error_code(resp)
+                    request_needed = False
+
+            elif verify_needed:
+                success = verify_func(*vargs)
+                if success:
+                    # XG verify func was completed
+                    verify_needed = False
+
+        return resp
+
+    def _ensure_snapshot_resource_area(self, volume_id):
+        """Make sure concerto snapshot resource area exists on volume.
+
+        :param volume_id:  Cinder volume ID corresponding to the backend LUN
+
+        Exceptions:
+            VolumeBackendAPIException: if cinder volume does not exist
+               on backnd, or SRA could not be created.
+        """
+
+        ctxt = context.get_admin_context()
+        volume = api.volume_get(ctxt, volume_id)
+        pool = None
+        if not volume:
+            msg = (_("Failed to ensure snapshot resource area, could not "
+                   "locate volume for id %s") % volume_id)
+            raise exception.VolumeBackendAPIException(data=msg)
+
+        if not self.vmem_mg.snapshot.lun_has_a_snapshot_resource(
+           lun=volume_id):
+            # Per Concerto documentation, the SRA size should be computed
+            # as follows
+            #  Size-of-original-LUN        Reserve for SRA
+            #   < 500MB                    100%
+            #   500MB to 2G                50%
+            #   >= 2G                      20%
+            # Note: cinder volume.size is in GB, vmemclient wants MB.
+            lun_size_mb = volume['size'] * units.Ki
+            if lun_size_mb < 500:
+                snap_size_mb = lun_size_mb
+            elif lun_size_mb < 2000:
+                snap_size_mb = 0.5 * lun_size_mb
+            else:
+                snap_size_mb = 0.2 * lun_size_mb
+
+            snap_size_mb = int(math.ceil(snap_size_mb))
+            typeid = volume['volume_type_id']
+            if typeid:
+                pool = self._get_violin_extra_spec(volume, "storage_pool")
+
+            LOG.debug("Creating SRA of %(ssmb)sMB for lun of %(lsmb)sMB "
+                      "on %(vol_id)s.",
+                      {'ssmb': snap_size_mb,
+                       'lsmb': lun_size_mb,
+                       'vol_id': volume_id})
+
+            res = self.vmem_mg.snapshot.create_snapshot_resource(
+                lun=volume_id,
+                size=snap_size_mb,
+                enable_notification=False,
+                policy=CONCERTO_DEFAULT_SRA_POLICY,
+                enable_expansion=CONCERTO_DEFAULT_SRA_ENABLE_EXPANSION,
+                expansion_threshold=CONCERTO_DEFAULT_SRA_EXPANSION_THRESHOLD,
+                expansion_increment=CONCERTO_DEFAULT_SRA_EXPANSION_INCREMENT,
+                expansion_max_size=CONCERTO_DEFAULT_SRA_EXPANSION_MAX_SIZE,
+                enable_shrink=CONCERTO_DEFAULT_SRA_ENABLE_SHRINK,
+                storage_pool=pool)
+
+            if (not res['success']):
+                msg = (_("Failed to create snapshot resource area on "
+                       "volume %(vol)s: %(res)s.") %
+                       {'vol': volume_id, 'res': res['msg']})
+                raise exception.VolumeBackendAPIException(data=msg)
+
+    def _ensure_snapshot_policy(self, volume_id):
+        """Ensure concerto snapshot policy exists on cinder volume.
+
+        A snapshot policy is required by concerto in order to create snapshots.
+
+        :param volume_id:  Cinder volume ID corresponding to the backend LUN
+
+        Exceptions:
+            VolumeBackendAPIException: when snapshot policy cannot be created.
+        """
+
+        if not self.vmem_mg.snapshot.lun_has_a_snapshot_policy(
+                lun=volume_id):
+
+            res = self.vmem_mg.snapshot.create_snapshot_policy(
+                lun=volume_id,
+                max_snapshots=CONCERTO_DEFAULT_POLICY_MAX_SNAPSHOTS,
+                enable_replication=False,
+                enable_snapshot_schedule=False,
+                enable_cdp=False,
+                retention_mode=CONCERTO_DEFAULT_POLICY_RETENTION_MODE)
+
+            if not res['success']:
+                msg = (_(
+                    "Failed to create snapshot policy on "
+                    "volume %(vol)s: %(res)s.") %
+                    {'vol': volume_id, 'res': res['msg']})
+                raise exception.VolumeBackendAPIException(data=msg)
+
+    def _delete_lun_snapshot_bookkeeping(self, volume_id):
+        """Clear residual snapshot support resources from LUN.
+
+        Exceptions:
+            VolumeBackendAPIException: If snapshots still exist on the LUN.
+        """
+
+        # Make absolutely sure there are no snapshots present
+        try:
+            snaps = self.vmem_mg.snapshot.get_snapshots(volume_id)
+            if len(snaps) > 0:
+                msg = (_("Cannot delete LUN %s while snapshots exist.") %
+                       volume_id)
+                raise exception.VolumeBackendAPIException(data=msg)
+        except vmemclient.core.error.NoMatchingObjectIdError:
+            pass
+        except vmemclient.core.error.MissingParameterError:
+            pass
+
+        try:
+            res = self.vmem_mg.snapshot.delete_snapshot_policy(
+                lun=volume_id)
+            if not res['success']:
+                if 'TimeMark is disabled' in res['msg']:
+                    LOG.debug("Verified no snapshot policy is on volume %s.",
+                              volume_id)
+                else:
+                    msg = (_("Unable to delete snapshot policy on "
+                             "volume %s.") % volume_id)
+                    raise exception.VolumeBackendAPIException(data=msg)
+            else:
+                LOG.debug("Deleted snapshot policy on volume "
+                          "%(vol)s, result %(res)s.",
+                          {'vol': volume_id, 'res': res})
+        except vmemclient.core.error.NoMatchingObjectIdError:
+            LOG.debug("Verified no snapshot policy present on volume %s.",
+                      volume_id)
+            pass
+
+        try:
+            res = self.vmem_mg.snapshot.delete_snapshot_resource(
+                lun=volume_id)
+            LOG.debug("Deleted snapshot resource area on "
+                      "volume %(vol)s, result %(res)s.",
+                      {'vol': volume_id, 'res': res})
+        except vmemclient.core.error.NoMatchingObjectIdError:
+            LOG.debug("Verified no snapshot resource area present on "
+                      "volume %s.", volume_id)
+            pass
+
+    def _compress_snapshot_id(self, cinder_snap_id):
+        """Compress cinder snapshot ID so it fits in backend.
+
+           Compresses to fit in 32-chars.
+        """
+        return ''.join(six.text_type(cinder_snap_id).split('-'))
+
+    def _get_snapshot_from_lun_snapshots(
+            self, cinder_volume_id, cinder_snap_id):
+        """Locate backend snapshot dict associated with cinder snapshot id.
+
+        :returns: Cinder snapshot dictionary if found, None otherwise.
+        """
+
+        try:
+            snaps = self.vmem_mg.snapshot.get_snapshots(cinder_volume_id)
+        except vmemclient.core.error.NoMatchingObjectIdError:
+            return None
+
+        key = self._compress_snapshot_id(cinder_snap_id)
+
+        for s in snaps:
+            if s['comment'] == key:
+                # Remap return dict to its uncompressed form
+                s['comment'] = cinder_snap_id
+                return s
+
+    def _wait_for_lun_or_snap_copy(self, src_vol_id, dest_vdev_id=None,
+                                   dest_obj_id=None):
+        """Poll to see when a lun or snap copy to a lun is complete.
+
+        :param src_vol_id:  cinder volume ID of source volume
+        :param dest_vdev_id:  virtual device ID of destination, for snap copy
+        :param dest_obj_id:  lun object ID of destination, for lun copy
+        :returns: True if successful, False otherwise
+        """
+        wait_id = None
+        wait_func = None
+
+        if dest_vdev_id:
+            wait_id = dest_vdev_id
+            wait_func = self.vmem_mg.snapshot.get_snapshot_copy_status
+        elif dest_obj_id:
+            wait_id = dest_obj_id
+            wait_func = self.vmem_mg.lun.get_lun_copy_status
+        else:
+            return False
+
+        def _loop_func():
+            LOG.debug("Entering _wait_for_lun_or_snap_copy loop: "
+                      "vdev=%s, objid=%s", dest_vdev_id, dest_obj_id)
+
+            status = wait_func(src_vol_id)
+
+            if status[0] is None:
+                # pre-copy transient result, status=(None, None, 0)
+                LOG.debug("lun or snap copy prepping.")
+                pass
+            elif status[0] != wait_id:
+                # the copy must be complete since another lun is being copied
+                LOG.debug("lun or snap copy complete.")
+                raise loopingcall.LoopingCallDone(retvalue=True)
+            elif status[1] is not None:
+                # copy is in progress, status = ('12345', 1700, 10)
+                LOG.debug("MB copied:%d, percent done: %d.",
+                          status[1], status[2])
+                pass
+            elif status[2] == 0:
+                # copy has just started, status = ('12345', None, 0)
+                LOG.debug("lun or snap copy started.")
+                pass
+            elif status[2] == 100:
+                # copy is complete, status = ('12345', None, 100)
+                LOG.debug("lun or snap copy complete.")
+                raise loopingcall.LoopingCallDone(retvalue=True)
+            else:
+                # unexpected case
+                LOG.debug("unexpected case (%{id}s, %{bytes}s, %{percent}s)",
+                          {'id': six.text_type(status[0]),
+                           'bytes': six.text_type(status[1]),
+                           'percent': six.text_type(status[2])})
+                raise loopingcall.LoopingCallDone(retvalue=False)
+
+        timer = loopingcall.FixedIntervalLoopingCall(_loop_func)
+        success = timer.start(interval=1).wait()
+
+        return success
+
+    def _is_supported_vmos_version(self, version_string):
+        """Check a version string for compatibility with OpenStack.
+
+        Compare a version string against the global regex of versions
+        compatible with OpenStack.
+
+        :param version_string:  array's gateway version string
+        :returns: True if supported, false if not
+        """
+        for pattern in CONCERTO_SUPPORTED_VERSION_PATTERNS:
+            if re.match(pattern, version_string):
+                return True
+        return False
+
+    def _check_error_code(self, response):
+        """Raise an exception when backend returns certain errors.
+
+        Error codes returned from the backend have to be examined
+        individually. Not all of them are fatal. For example, lun attach
+        failing becase the client is already attached is not a fatal error.
+
+        :param response:  a response dict result from the vmemclient request
+        """
+        if "Error: 0x9001003c" in response['msg']:
+            # This error indicates a duplicate attempt to attach lun,
+            # non-fatal error
+            pass
+        elif "Error: 0x9002002b" in response['msg']:
+            # lun unexport failed - lun is not exported to any clients,
+            # non-fatal error
+            pass
+        elif "Error: 0x09010023" in response['msg']:
+            # lun delete failed - dependent snapshot copy in progress,
+            # fatal error
+            raise exception.ViolinBackendErr(message=response['msg'])
+        elif "Error: 0x09010048" in response['msg']:
+            # lun delete failed - dependent snapshots still exist,
+            # fatal error
+            raise exception.ViolinBackendErr(message=response['msg'])
+        elif "Error: 0x90010022" in response['msg']:
+            # lun create failed - lun with same name already exists,
+            # fatal error
+            raise exception.ViolinBackendErrExists()
+        elif "Error: 0x90010089" in response['msg']:
+            # lun export failed - lun is still being created as copy,
+            # fatal error
+            raise exception.ViolinBackendErr(message=response['msg'])
+        else:
+            # assume any other error is fatal
+            raise exception.ViolinBackendErr(message=response['msg'])
+
+    def _get_volume_type_extra_spec(self, volume, spec_key):
+        """Parse data stored in a volume_type's extra_specs table.
+
+        :param volume:  volume object containing volume_type to query
+        :param spec_key:  the metadata key to search for
+        :returns: string value associated with spec_key
+        """
+        spec_value = None
+        ctxt = context.get_admin_context()
+        typeid = volume['volume_type_id']
+        if typeid:
+            volume_type = volume_types.get_volume_type(ctxt, typeid)
+            volume_specs = volume_type.get('extra_specs')
+            for key, val in volume_specs.iteritems():
+
+                # Strip the prefix "capabilities"
+                if ':' in key:
+                    scope = key.split(':')
+                    key = scope[1]
+                if key == spec_key:
+                    spec_value = val
+                    break
+
+        return spec_value
+
+    def _get_violin_extra_spec(self, volume, spec_key):
+        """Parse volume_type's extra_specs table for a violin-specific key.
+
+        :param volume:  volume object containing volume_type to query
+        :param spec_key:  the metadata key to search for
+        :returns: string value associated with spec_key
+        """
+        spec_value = None
+        ctxt = context.get_admin_context()
+        typeid = volume['volume_type_id']
+        if typeid:
+            volume_type = volume_types.get_volume_type(ctxt, typeid)
+            volume_specs = volume_type.get('extra_specs')
+            for key, val in volume_specs.iteritems():
+
+                # Strip the prefix "violin"
+                if ':' in key:
+                    scope = key.split(':')
+                    key = scope[1]
+                    if scope[0] == "violin" and key == spec_key:
+                        spec_value = val
+                        break
+        return spec_value
diff --git a/cinder/volume/drivers/violin/v7000_fcp.py b/cinder/volume/drivers/violin/v7000_fcp.py
new file mode 100644 (file)
index 0000000..30d7d2c
--- /dev/null
@@ -0,0 +1,384 @@
+# Copyright 2015 Violin Memory, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+Violin 7000 Series All-Flash Array Volume Driver
+
+Provides fibre channel specific LUN services for V7000 series flash
+arrays.
+
+This driver requires Concerto v7.0.0 or newer software on the array.
+
+You will need to install the Violin Memory REST client library:
+sudo pip install vmemclient
+
+Set the following in the cinder.conf file to enable the VMEM V7000
+Fibre Channel Driver along with the required flags:
+
+volume_driver=cinder.volume.drivers.violin.v7000_fcp.V7000FCDriver
+
+NOTE: this driver file requires the use of synchronization points for
+certain types of backend operations, and as a result may not work
+properly in an active-active HA configuration.  See OpenStack Cinder
+driver documentation for more information.
+"""
+
+from oslo_log import log as logging
+
+from cinder import exception
+from cinder.i18n import _, _LE, _LI
+from cinder import utils
+from cinder.volume import driver
+from cinder.volume.drivers.san import san
+from cinder.volume.drivers.violin import v7000_common
+from cinder.zonemanager import utils as fczm_utils
+
+import socket
+
+LOG = logging.getLogger(__name__)
+
+
+class V7000FCPDriver(driver.FibreChannelDriver):
+    """Executes commands relating to fibre channel based Violin Memory arrays.
+
+    Version history:
+        1.0 - Initial driver
+    """
+
+    VERSION = '1.0'
+
+    def __init__(self, *args, **kwargs):
+        super(V7000FCPDriver, self).__init__(*args, **kwargs)
+        self.gateway_fc_wwns = []
+        self.stats = {}
+        self.configuration.append_config_values(v7000_common.violin_opts)
+        self.configuration.append_config_values(san.san_opts)
+        self.common = v7000_common.V7000Common(self.configuration)
+        self.lookup_service = fczm_utils.create_lookup_service()
+
+        LOG.info(_LI("Initialized driver %(name)s version: %(vers)s"),
+                 {'name': self.__class__.__name__, 'vers': self.VERSION})
+
+    def do_setup(self, context):
+        """Any initialization the driver does while starting."""
+        super(V7000FCPDriver, self).do_setup(context)
+
+        self.common.do_setup(context)
+        self.gateway_fc_wwns = self._get_active_fc_targets()
+
+        # Register the client with the storage array
+        fc_version = self.VERSION + "-FCP"
+        self.common.vmem_mg.utility.set_managed_by_openstack_version(
+            fc_version)
+
+    def check_for_setup_error(self):
+        """Returns an error if prerequisites aren't met."""
+        self.common.check_for_setup_error()
+        if len(self.gateway_fc_wwns) == 0:
+            raise exception.ViolinInvalidBackendConfig(
+                reason=_('No FCP targets found'))
+
+    def create_volume(self, volume):
+        """Creates a volume."""
+        self.common._create_lun(volume)
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot."""
+        self.common._create_volume_from_snapshot(snapshot, volume)
+
+    def create_cloned_volume(self, volume, src_vref):
+        """Creates a clone of the specified volume."""
+        self.common._create_lun_from_lun(src_vref, volume)
+
+    def delete_volume(self, volume):
+        """Deletes a volume."""
+        self.common._delete_lun(volume)
+
+    def extend_volume(self, volume, new_size):
+        """Extend an existing volume's size."""
+        self.common._extend_lun(volume, new_size)
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot."""
+        self.common._create_lun_snapshot(snapshot)
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+        self.common._delete_lun_snapshot(snapshot)
+
+    def ensure_export(self, context, volume):
+        """Synchronously checks and re-exports volumes at cinder start time."""
+        pass
+
+    def create_export(self, context, volume):
+        """Exports the volume."""
+        pass
+
+    def remove_export(self, context, volume):
+        """Removes an export for a logical volume."""
+        pass
+
+    @fczm_utils.AddFCZone
+    def initialize_connection(self, volume, connector):
+        """Allow connection to connector and return connection info."""
+
+        LOG.debug("Initialize_connection: initiator - %(initiator)s  host - "
+                  "%(host)s wwpns - %(wwpns)s",
+                  {'initiator': connector['initiator'],
+                   'host': connector['host'],
+                   'wwpns': connector['wwpns']})
+
+        self.common.vmem_mg.client.create_client(
+            name=connector['host'], proto='FC', fc_wwns=connector['wwpns'])
+
+        lun_id = self._export_lun(volume, connector)
+
+        target_wwns, init_targ_map = self._build_initiator_target_map(
+            connector)
+
+        properties = {}
+        properties['target_discovered'] = True
+        properties['target_wwn'] = target_wwns
+        properties['target_lun'] = lun_id
+        properties['access_mode'] = 'rw'
+        properties['initiator_target_map'] = init_targ_map
+
+        LOG.debug("Return FC data for zone addition: %(properties)s.",
+                  {'properties': properties})
+
+        return {'driver_volume_type': 'fibre_channel', 'data': properties}
+
+    @fczm_utils.RemoveFCZone
+    def terminate_connection(self, volume, connector, **kwargs):
+        """Terminates the connection (target<-->initiator)."""
+
+        self._unexport_lun(volume, connector)
+
+        properties = {}
+
+        if not self._is_initiator_connected_to_array(connector):
+            target_wwns, init_targ_map = self._build_initiator_target_map(
+                connector)
+            properties['target_wwn'] = target_wwns
+            properties['initiator_target_map'] = init_targ_map
+
+        LOG.debug("Return FC data for zone deletion: %(properties)s.",
+                  {'properties': properties})
+
+        return {'driver_volume_type': 'fibre_channel', 'data': properties}
+
+    def get_volume_stats(self, refresh=False):
+        """Get volume stats.
+
+        If 'refresh' is True, update the stats first.
+        """
+        if refresh or not self.stats:
+            self._update_volume_stats()
+        return self.stats
+
+    @utils.synchronized('vmem-export')
+    def _export_lun(self, volume, connector=None):
+        """Generates the export configuration for the given volume.
+
+        :param volume:  volume object provided by the Manager
+        :param connector:  connector object provided by the Manager
+        :returns: the LUN ID assigned by the backend
+        """
+        lun_id = ''
+        v = self.common.vmem_mg
+
+        if not connector:
+            raise exception.ViolinInvalidBackendConfig(
+                reason=_('No initiators found, cannot proceed'))
+
+        LOG.debug("Exporting lun %(vol_id)s - initiator wwpns %(i_wwpns)s "
+                  "- target wwpns %(t_wwpns)s.",
+                  {'vol_id': volume['id'], 'i_wwpns': connector['wwpns'],
+                   't_wwpns': self.gateway_fc_wwns})
+
+        try:
+            lun_id = self.common._send_cmd_and_verify(
+                v.lun.assign_lun_to_client,
+                self._is_lun_id_ready,
+                "Assign SAN client successfully",
+                [volume['id'], connector['host'],
+                 "ReadWrite"],
+                [volume['id'], connector['host']])
+
+        except exception.ViolinBackendErr:
+            LOG.exception(_LE("Backend returned err for lun export."))
+            raise
+
+        except Exception:
+            raise exception.ViolinInvalidBackendConfig(
+                reason=_('LUN export failed!'))
+
+        lun_id = self._get_lun_id(volume['id'], connector['host'])
+        LOG.info(_LI("Exported lun %(vol_id)s on lun_id %(lun_id)s."),
+                 {'vol_id': volume['id'], 'lun_id': lun_id})
+
+        return lun_id
+
+    @utils.synchronized('vmem-export')
+    def _unexport_lun(self, volume, connector=None):
+        """Removes the export configuration for the given volume.
+
+        :param volume:  volume object provided by the Manager
+        """
+        v = self.common.vmem_mg
+
+        LOG.info(_LI("Unexporting lun %s."), volume['id'])
+
+        try:
+            self.common._send_cmd(v.lun.unassign_client_lun,
+                                  "Unassign SAN client successfully",
+                                  volume['id'], connector['host'], True)
+
+        except exception.ViolinBackendErr:
+            LOG.exception(_LE("Backend returned err for lun export."))
+            raise
+
+        except Exception:
+            LOG.exception(_LE("LUN unexport failed!"))
+            raise
+
+    def _update_volume_stats(self):
+        """Gathers array stats and converts them to GB values."""
+        data = {}
+        total_gb = 0
+        free_gb = 0
+        v = self.common.vmem_mg.basic
+        array_name_triple = socket.gethostbyaddr(self.configuration.san_ip)
+        array_name = array_name_triple[0]
+
+        phy_devices = v.get("/batch/physicalresource/physicaldevice")
+
+        all_devices = [x for x in phy_devices['data']['physical_devices']]
+
+        for x in all_devices:
+            if socket.getfqdn(x['owner']) == array_name:
+                total_gb += x['size_mb'] / 1024
+                free_gb += x['availsize_mb'] / 1024
+
+        backend_name = self.configuration.volume_backend_name
+        data['volume_backend_name'] = backend_name or self.__class__.__name__
+        data['vendor_name'] = 'Violin Memory, Inc.'
+        data['driver_version'] = self.VERSION
+        data['storage_protocol'] = 'fibre_channel'
+        data['reserved_percentage'] = 0
+        data['QoS_support'] = False
+        data['total_capacity_gb'] = total_gb
+        data['free_capacity_gb'] = free_gb
+        for i in data:
+            LOG.debug("stat update: %(name)s=%(data)s",
+                      {'name': i, 'data': data[i]})
+
+        self.stats = data
+
+    def _get_active_fc_targets(self):
+        """Get a list of gateway WWNs that can be used as FCP targets.
+
+        :param mg_conn:  active XG connection to one of the gateways
+        :returns:  list of WWNs in openstack format
+        """
+        v = self.common.vmem_mg
+        active_gw_fcp_wwns = []
+
+        fc_info = v.adapter.get_fc_info()
+        for x in fc_info.itervalues():
+            active_gw_fcp_wwns.append(x[0])
+
+        return active_gw_fcp_wwns
+
+    def _get_lun_id(self, volume_name, client_name):
+        """Get the lun ID for an exported volume.
+
+        If the lun is successfully assigned (exported) to a client, the
+        client info has the lun_id.
+
+        :param volume_name:  name of volume to query for lun ID
+        :param client_name:  name of client associated with the volume
+        :returns: integer value of lun ID
+        """
+        v = self.common.vmem_mg
+        lun_id = -1
+
+        client_info = v.client.get_client_info(client_name)
+
+        for x in client_info['FibreChannelDevices']:
+            if volume_name == x['name']:
+                lun_id = x['lun']
+                break
+
+        return int(lun_id)
+
+    def _is_lun_id_ready(self, volume_name, client_name):
+        """Get the lun ID for an exported volume.
+
+        If the lun is successfully assigned (exported) to a client, the
+        client info has the lun_id.
+
+        :param volume_name:  name of volume to query for lun ID
+        :param client_name:  name of client associated with the volume
+        :returns: Returns True if lun is ready, False otherwise
+        """
+
+        lun_id = -1
+        lun_id = self._get_lun_id(volume_name, client_name)
+        if lun_id != -1:
+            return True
+        else:
+            return False
+
+    def _build_initiator_target_map(self, connector):
+        """Build the target_wwns and the initiator target map."""
+        target_wwns = []
+        init_targ_map = {}
+
+        if self.lookup_service:
+            dev_map = self.lookup_service.get_device_mapping_from_network(
+                connector['wwpns'], self.gateway_fc_wwns)
+
+            for fabric_name in dev_map:
+                fabric = dev_map[fabric_name]
+                target_wwns += fabric['target_port_wwn_list']
+                for initiator in fabric['initiator_port_wwn_list']:
+                    if initiator not in init_targ_map:
+                        init_targ_map[initiator] = []
+                    init_targ_map[initiator] += fabric['target_port_wwn_list']
+                    init_targ_map[initiator] = list(
+                        set(init_targ_map[initiator]))
+
+            target_wwns = list(set(target_wwns))
+
+        else:
+            initiator_wwns = connector['wwpns']
+            target_wwns = self.gateway_fc_wwns
+            for initiator in initiator_wwns:
+                init_targ_map[initiator] = target_wwns
+
+        return target_wwns, init_targ_map
+
+    def _is_initiator_connected_to_array(self, connector):
+        """Check if any initiator wwns still have active sessions."""
+        v = self.common.vmem_mg
+
+        client = v.client.get_client_info(connector['host'])
+
+        if len(client['FibreChannelDevices']):
+            # each entry in the FibreChannelDevices array is a dict
+            # describing an active lun assignment
+            return True
+        return False