]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Add the StorPool block storage driver.
authorPeter Pentchev <openstack-dev@storpool.com>
Wed, 3 Dec 2014 12:49:23 +0000 (14:49 +0200)
committerPeter Pentchev <openstack-dev@storpool.com>
Mon, 15 Dec 2014 23:19:22 +0000 (01:19 +0200)
StorPool is distributed data storage software running on standard x86
servers.  StorPool aggregates the performance and capacity of all drives
into a shared pool of storage distributed among the servers.  Within
this storage pool the user creates thin-provisioned volumes that are
exposed to the clients as block devices.  StorPool consists of two parts
wrapped in one package - a server and a client.  The StorPool server
allows a hypervisor to act as a storage node, while the StorPool client
allows a hypervisor node to access the storage pool and act as a compute
node.  In OpenStack terms the StorPool solution allows each hypervisor
node to be both a storage and a compute node simultaneously.

To make full use of StorPool's native network communication protocol,
the Nova compute nodes will need to use the StorPool libvirt volume
attachment driver, nova.virt.libvirt.storpool.LibvirtStorPoolVolumeDriver.

DocImpact
Change-Id: I64adbca724a52771b68e4838749f8896e6e56019
Implements: blueprint storpool-block-driver

cinder/tests/test_storpool.py [new file with mode: 0644]
cinder/volume/drivers/storpool.py [new file with mode: 0644]

diff --git a/cinder/tests/test_storpool.py b/cinder/tests/test_storpool.py
new file mode 100644 (file)
index 0000000..d5e02cf
--- /dev/null
@@ -0,0 +1,393 @@
+# Copyright 2014 StorPool
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+import re
+import sys
+
+import mock
+from oslo.utils import units
+
+
+fakeStorPool = mock.Mock()
+fakeStorPool.spopenstack = mock.Mock()
+fakeStorPool.spapi = mock.Mock()
+fakeStorPool.sptypes = mock.Mock()
+sys.modules['storpool'] = fakeStorPool
+
+
+from cinder import exception
+from cinder import test
+from cinder.volume import configuration as conf
+from cinder.volume.drivers import storpool as driver
+
+
+volume_types = {
+    1: {},
+    2: {'storpool_template': 'ssd'},
+    3: {'storpool_template': 'hdd'}
+}
+volumes = {}
+snapshots = {}
+
+
+def MockExtraSpecs(vtype):
+    return volume_types[vtype]
+
+
+def mock_volume_types(f):
+    def _types_inner_inner1(inst, *args, **kwargs):
+        @mock.patch('cinder.volume.volume_types.get_volume_type_extra_specs',
+                    new=MockExtraSpecs)
+        def _types_inner_inner2():
+            return f(inst, *args, **kwargs)
+
+        return _types_inner_inner2()
+
+    return _types_inner_inner1
+
+
+def volumeName(vid):
+    return 'os--volume--{id}'.format(id=vid)
+
+
+def snapshotName(vtype, vid):
+    return 'os--snap--{t}--{id}'.format(t=vtype, id=vid)
+
+
+class MockDisk(object):
+    def __init__(self, diskId):
+        self.id = diskId
+        self.generationLeft = -1
+        self.agCount = 13
+        self.agFree = 12
+        self.agAllocated = 1
+
+
+class MockTemplate(object):
+    def __init__(self, name):
+        self.name = name
+
+
+class MockApiError(Exception):
+    def __init__(self, msg):
+        super(MockApiError, self).__init__(msg)
+
+
+class MockAPI(object):
+    def __init__(self):
+        self._disks = {diskId: MockDisk(diskId) for diskId in (1, 2, 3, 4)}
+        self._disks[3].generationLeft = 42
+
+        self._templates = [MockTemplate(name) for name in ('ssd', 'hdd')]
+
+    def setlog(self, log):
+        self._log = log
+
+    def disksList(self):
+        return self._disks
+
+    def snapshotCreate(self, vname, snap):
+        snapshots[snap['name']] = dict(volumes[vname])
+
+    def snapshotDelete(self, name):
+        del snapshots[name]
+
+    def volumeCreate(self, v):
+        if v['name'] in volumes:
+            raise MockApiError('volume already exists')
+        volumes[v['name']] = v
+
+    def volumeDelete(self, name):
+        del volumes[name]
+
+    def volumeTemplatesList(self):
+        return self._templates
+
+    def volumesReassign(self, json):
+        pass
+
+    def volumeUpdate(self, name, size):
+        volumes[name]['size'] = size['size']
+
+
+class MockAttachDB(object):
+    def __init__(self, log):
+        self._api = MockAPI()
+
+    def api(self):
+        return self._api
+
+    def volumeName(self, vid):
+        return volumeName(vid)
+
+    def snapshotName(self, vtype, vid):
+        return snapshotName(vtype, vid)
+
+
+def MockVolumeUpdateDesc(size):
+    return {'size': size}
+
+
+fakeStorPool.spapi.ApiError = MockApiError
+fakeStorPool.spopenstack.AttachDB = MockAttachDB
+fakeStorPool.sptypes.VolumeUpdateDesc = MockVolumeUpdateDesc
+
+
+class StorPoolTestCase(test.TestCase):
+
+    def setUp(self):
+        super(StorPoolTestCase, self).setUp()
+
+        self.cfg = mock.Mock(spec=conf.Configuration)
+        self.cfg.volume_backend_name = 'storpool_test'
+        self.cfg.storpool_template = None
+        self.cfg.storpool_replication = 3
+
+        mock_exec = mock.Mock()
+        mock_exec.return_value = ('', '')
+
+        self.driver = driver.StorPoolDriver(execute=mock_exec,
+                                            configuration=self.cfg)
+
+    def test_initialized(self):
+        self.driver.check_for_setup_error()
+        self.driver.validate_connector(None)
+        self.driver.validate_connector(5)
+        c = self.driver.initialize_connection(None, None)
+        self.assertEqual('storpool', c['driver_volume_type'])
+        self.assertDictEqual({}, c['data'])
+        self.driver.terminate_connection(None, None)
+        self.driver.create_export(None, None)
+        self.driver.remove_export(None, None)
+
+    def test_stats(self):
+        stats = self.driver.get_volume_stats(refresh=True)
+        self.assertEqual('StorPool', stats['vendor_name'])
+        self.assertEqual('storpool', stats['storage_protocol'])
+        self.assertListEqual(['default', 'template_hdd', 'template_ssd'],
+                             sorted([p['pool_name'] for p in stats['pools']]))
+        r = re.compile('^template_([A-Za-z0-9_]+)$')
+        for pool in stats['pools']:
+            self.assertEqual(19, pool['total_capacity_gb'])
+            self.assertEqual(5, pool['free_capacity_gb'])
+            if pool['pool_name'] != 'default':
+                m = r.match(pool['pool_name'])
+                self.assertIsNotNone(m)
+                self.assertIsNotNone(m.group(1))
+                self.assertEqual(m.group(1), pool['storpool_template'])
+
+    def assertVolumeNames(self, names):
+        self.assertListEqual(sorted([volumeName(n) for n in names]),
+                             sorted(volumes.keys()))
+
+    @mock_volume_types
+    def test_create_delete_volume(self):
+        self.assertVolumeNames([])
+        self.assertDictEqual({}, volumes)
+        self.assertDictEqual({}, snapshots)
+
+        self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1,
+                                   'volume_type': None})
+        self.assertListEqual([volumeName('1')], volumes.keys())
+        self.assertVolumeNames(('1',))
+        v = volumes[volumeName('1')]
+        self.assertEqual(1 * units.Gi, v['size'])
+        self.assertNotIn('template', v.keys())
+        self.assertEqual(3, v['replication'])
+
+        caught = False
+        try:
+            self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 0,
+                                       'volume_type': None})
+        except exception.VolumeBackendAPIException:
+            caught = True
+        self.assertEqual(True, caught)
+
+        self.driver.delete_volume({'id': '1'})
+        self.assertVolumeNames([])
+        self.assertDictEqual({}, volumes)
+
+        self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 2,
+                                   'volume_type': None})
+        self.assertVolumeNames(('1',))
+        v = volumes[volumeName('1')]
+        self.assertEqual(2 * units.Gi, v['size'])
+        self.assertNotIn('template', v.keys())
+        self.assertEqual(3, v['replication'])
+
+        self.driver.create_volume({'id': '2', 'name': 'v2', 'size': 3,
+                                   'volume_type': {'id': 1}})
+        self.assertVolumeNames(('1', '2'))
+        v = volumes[volumeName('2')]
+        self.assertEqual(3 * units.Gi, v['size'])
+        self.assertNotIn('template', v.keys())
+        self.assertEqual(3, v['replication'])
+
+        self.driver.create_volume({'id': '3', 'name': 'v2', 'size': 4,
+                                   'volume_type': {'id': 2}})
+        self.assertVolumeNames(('1', '2', '3'))
+        v = volumes[volumeName('3')]
+        self.assertEqual(4 * units.Gi, v['size'])
+        self.assertEqual('ssd', v['template'])
+        self.assertNotIn('replication', v.keys())
+
+        self.driver.create_volume({'id': '4', 'name': 'v2', 'size': 5,
+                                   'volume_type': {'id': 3}})
+        self.assertVolumeNames(('1', '2', '3', '4'))
+        v = volumes[volumeName('4')]
+        self.assertEqual(5 * units.Gi, v['size'])
+        self.assertEqual('hdd', v['template'])
+        self.assertNotIn('replication', v.keys())
+
+        # Make sure the dictionary is not corrupted somehow...
+        v = volumes[volumeName('1')]
+        self.assertEqual(2 * units.Gi, v['size'])
+        self.assertNotIn('template', v.keys())
+        self.assertEqual(3, v['replication'])
+
+        for vid in ('1', '2', '3', '4'):
+            self.driver.delete_volume({'id': vid})
+        self.assertVolumeNames([])
+        self.assertDictEqual({}, volumes)
+        self.assertDictEqual({}, snapshots)
+
+    def test_clone_extend_volume(self):
+        self.assertVolumeNames([])
+        self.assertDictEqual({}, volumes)
+        self.assertDictEqual({}, snapshots)
+
+        self.driver.create_volume({'id': '1', 'name': 'v1', 'size': 1,
+                                   'volume_type': None})
+        self.assertVolumeNames(('1',))
+        self.driver.extend_volume({'id': '1'}, 2)
+        self.assertEqual(2 * units.Gi, volumes[volumeName('1')]['size'])
+
+        self.driver.create_cloned_volume({'id': '2', 'name': 'clo', 'size': 3},
+                                         {'id': 1})
+        self.assertVolumeNames(('1', '2'))
+        self.assertDictEqual({}, snapshots)
+        # Note: this would not be true in a real environment (the snapshot will
+        # have been deleted, the volume would have no parent), but with this
+        # fake implementation it helps us make sure that the second volume was
+        # created with the proper options.
+        self.assertEqual(volumes[volumeName('2')]['parent'],
+                         snapshotName('clone', '2'))
+
+        self.driver.delete_volume({'id': 1})
+        self.driver.delete_volume({'id': 2})
+
+        self.assertDictEqual({}, volumes)
+        self.assertDictEqual({}, snapshots)
+
+    @mock_volume_types
+    def test_config_replication(self):
+        self.assertVolumeNames([])
+        self.assertDictEqual({}, volumes)
+        self.assertDictEqual({}, snapshots)
+
+        save_repl = self.driver.configuration.storpool_replication
+
+        self.driver.configuration.storpool_replication = 3
+        stats = self.driver.get_volume_stats(refresh=True)
+        pool = stats['pools'][0]
+        self.assertEqual(19, pool['total_capacity_gb'])
+        self.assertEqual(5, pool['free_capacity_gb'])
+
+        self.driver.create_volume({'id': 'cfgrepl1', 'name': 'v1', 'size': 1,
+                                   'volume_type': None})
+        self.assertVolumeNames(('cfgrepl1',))
+        v = volumes[volumeName('cfgrepl1')]
+        self.assertEqual(3, v['replication'])
+        self.assertNotIn('template', v)
+        self.driver.delete_volume({'id': 'cfgrepl1'})
+
+        self.driver.configuration.storpool_replication = 2
+        stats = self.driver.get_volume_stats(refresh=True)
+        pool = stats['pools'][0]
+        self.assertEqual(19, pool['total_capacity_gb'])
+        self.assertEqual(8, pool['free_capacity_gb'])
+
+        self.driver.create_volume({'id': 'cfgrepl2', 'name': 'v1', 'size': 1,
+                                   'volume_type': None})
+        self.assertVolumeNames(('cfgrepl2',))
+        v = volumes[volumeName('cfgrepl2')]
+        self.assertEqual(2, v['replication'])
+        self.assertNotIn('template', v)
+        self.driver.delete_volume({'id': 'cfgrepl2'})
+
+        self.driver.create_volume({'id': 'cfgrepl3', 'name': 'v1', 'size': 1,
+                                   'volume_type': {'id': 2}})
+        self.assertVolumeNames(('cfgrepl3',))
+        v = volumes[volumeName('cfgrepl3')]
+        self.assertNotIn('replication', v)
+        self.assertEqual('ssd', v['template'])
+        self.driver.delete_volume({'id': 'cfgrepl3'})
+
+        self.driver.configuration.storpool_replication = save_repl
+
+        self.assertVolumeNames([])
+        self.assertDictEqual({}, volumes)
+        self.assertDictEqual({}, snapshots)
+
+    @mock_volume_types
+    def test_config_template(self):
+        self.assertVolumeNames([])
+        self.assertDictEqual({}, volumes)
+        self.assertDictEqual({}, snapshots)
+
+        save_template = self.driver.configuration.storpool_template
+
+        self.driver.configuration.storpool_template = None
+
+        self.driver.create_volume({'id': 'cfgtempl1', 'name': 'v1', 'size': 1,
+                                   'volume_type': None})
+        self.assertVolumeNames(('cfgtempl1',))
+        v = volumes[volumeName('cfgtempl1')]
+        self.assertEqual(3, v['replication'])
+        self.assertNotIn('template', v)
+        self.driver.delete_volume({'id': 'cfgtempl1'})
+
+        self.driver.create_volume({'id': 'cfgtempl2', 'name': 'v1', 'size': 1,
+                                   'volume_type': {'id': 2}})
+        self.assertVolumeNames(('cfgtempl2',))
+        v = volumes[volumeName('cfgtempl2')]
+        self.assertNotIn('replication', v)
+        self.assertEqual('ssd', v['template'])
+        self.driver.delete_volume({'id': 'cfgtempl2'})
+
+        self.driver.configuration.storpool_template = 'hdd'
+
+        self.driver.create_volume({'id': 'cfgtempl3', 'name': 'v1', 'size': 1,
+                                   'volume_type': None})
+        self.assertVolumeNames(('cfgtempl3',))
+        v = volumes[volumeName('cfgtempl3')]
+        self.assertNotIn('replication', v)
+        self.assertEqual('hdd', v['template'])
+        self.driver.delete_volume({'id': 'cfgtempl3'})
+
+        self.driver.create_volume({'id': 'cfgtempl4', 'name': 'v1', 'size': 1,
+                                   'volume_type': {'id': 2}})
+        self.assertVolumeNames(('cfgtempl4',))
+        v = volumes[volumeName('cfgtempl4')]
+        self.assertNotIn('replication', v)
+        self.assertEqual('ssd', v['template'])
+        self.driver.delete_volume({'id': 'cfgtempl4'})
+
+        self.driver.configuration.storpool_template = save_template
+
+        self.assertVolumeNames([])
+        self.assertDictEqual({}, volumes)
+        self.assertDictEqual({}, snapshots)
diff --git a/cinder/volume/drivers/storpool.py b/cinder/volume/drivers/storpool.py
new file mode 100644 (file)
index 0000000..f8cc49c
--- /dev/null
@@ -0,0 +1,333 @@
+#    Copyright (c) 2014 StorPool
+#    All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""StorPool block device driver"""
+
+from __future__ import absolute_import
+
+from oslo.config import cfg
+from oslo.utils import units
+import six
+
+from cinder import exception
+from cinder.i18n import _LE
+from cinder.openstack.common import log as logging
+from cinder.volume import driver
+from cinder.volume import volume_types
+
+LOG = logging.getLogger(__name__)
+
+from storpool import spapi
+from storpool import spopenstack
+from storpool import sptypes
+
+
+storpool_opts = [
+    cfg.StrOpt('storpool_template',
+               default=None,
+               help='The StorPool template for volumes with no type.'),
+    cfg.IntOpt('storpool_replication',
+               default=3,
+               help='The default StorPool chain replication value.  '
+                    'Used when creating a volume with no specified type if '
+                    'storpool_template is not set.  Also used for calculating '
+                    'the apparent free space reported in the stats.')
+]
+
+
+class StorPoolDriver(driver.VolumeDriver):
+    """The StorPool block device driver using the StorPool API"""
+
+    VERSION = '0.1.0'
+
+    def __init__(self, *args, **kwargs):
+        super(StorPoolDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(storpool_opts)
+        self._sp_config = None
+        self._ourId = None
+        self._ourIdInt = None
+        self._attach = spopenstack.AttachDB(log=LOG)
+
+    def _backendException(self, e):
+        return exception.VolumeBackendAPIException(data=six.text_type(e))
+
+    def _template_from_volume_type(self, vtype):
+        specs = volume_types.get_volume_type_extra_specs(vtype['id'])
+        if specs is None:
+            return None
+        return specs.get('storpool_template', None)
+
+    def create_volume(self, volume):
+        size = int(volume['size']) * units.Gi
+        name = self._attach.volumeName(volume['id'])
+        if volume['volume_type'] is not None:
+            template = self._template_from_volume_type(volume['volume_type'])
+        else:
+            template = self.configuration.storpool_template
+        try:
+            if template is None:
+                self._attach.api().volumeCreate({
+                    'name': name,
+                    'size': size,
+                    'replication': self.configuration.storpool_replication
+                })
+            else:
+                self._attach.api().volumeCreate({
+                    'name': name,
+                    'size': size,
+                    'template': template
+                })
+        except spapi.ApiError as e:
+            raise self._backendException(e)
+
+    def validate_connector(self, connector):
+        pass
+
+    def initialize_connection(self, volume, connector):
+        return {'driver_volume_type': 'storpool', 'data': {}}
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        pass
+
+    def create_snapshot(self, snapshot):
+        volname = self._attach.volumeName(snapshot['volume_id'])
+        name = self._attach.snapshotName('snap', snapshot['id'])
+        try:
+            self._attach.api().snapshotCreate(volname, {'name': name})
+        except spapi.ApiError as e:
+            raise self._backendException(e)
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        size = int(volume['size']) * units.Gi
+        volname = self._attach.volumeName(volume['id'])
+        name = self._attach.snapshotName('snap', snapshot['id'])
+        try:
+            self._attach.api().volumeCreate({
+                'name': volname,
+                'size': size,
+                'parent': name
+            })
+        except spapi.ApiError as e:
+            raise self._backendException(e)
+
+    def create_cloned_volume(self, volume, src_vref):
+        refname = self._attach.volumeName(src_vref['id'])
+        snapname = self._attach.snapshotName('clone', volume['id'])
+        try:
+            self._attach.api().snapshotCreate(refname, {'name': snapname})
+        except spapi.ApiError as e:
+            raise self._backendException(e)
+
+        size = int(volume['size']) * units.Gi
+        volname = self._attach.volumeName(volume['id'])
+        try:
+            self._attach.api().volumeCreate({
+                'name': volname,
+                'size': size,
+                'parent': snapname
+            })
+        except spapi.ApiError as e:
+            raise self._backendException(e)
+        finally:
+            try:
+                self._attach.api().snapshotDelete(snapname)
+            except spapi.ApiError as e:
+                # ARGH!
+                LOG.error(_LE("Could not delete the temp snapshot {n}: {msg}").
+                          format(n=snapname, msg=six.text_type(e)))
+
+    def create_export(self, context, volume):
+        pass
+
+    def remove_export(self, context, volume):
+        pass
+
+    def delete_volume(self, volume):
+        name = self._attach.volumeName(volume['id'])
+        try:
+            self._attach.api().volumesReassign(
+                json=[{"volume": name, "detach": "all"}])
+            self._attach.api().volumeDelete(name)
+        except spapi.ApiError as e:
+            if e.name == 'objectDoesNotExist':
+                pass
+            else:
+                raise self._backendException(e)
+
+    def delete_snapshot(self, snapshot):
+        name = self._attach.snapshotName('snap', snapshot['id'])
+        try:
+            self._attach.api().volumesReassign(
+                json=[{"snapshot": name, "detach": "all"}])
+            self._attach.api().snapshotDelete(name)
+        except spapi.ApiError as e:
+            if e.name == 'objectDoesNotExist':
+                pass
+            else:
+                raise self._backendException(e)
+
+    def check_for_setup_error(self):
+        try:
+            self._attach.api()
+        except Exception as e:
+            LOG.error(_LE("StorPoolDriver API initialization failed: {e}").
+                      format(e=e))
+            raise
+
+    def get_volume_stats(self, refresh=False):
+        if refresh:
+            self._update_volume_stats()
+
+        return self._stats
+
+    def _update_volume_stats(self):
+        try:
+            dl = self._attach.api().disksList()
+            templates = self._attach.api().volumeTemplatesList()
+        except spapi.ApiError as e:
+            raise self._backendException(e)
+        total = 0
+        used = 0
+        free = 0
+        agSize = 512 * units.Mi
+        for (id, desc) in dl.iteritems():
+            if desc.generationLeft != -1:
+                continue
+            total += desc.agCount * agSize
+            used += desc.agAllocated * agSize
+            free += desc.agFree * agSize * 4096 / (4096 + 128)
+
+        # Report the free space as if all new volumes will be created
+        # with StorPool replication 3; anything else is rare.
+        free /= self.configuration.storpool_replication
+
+        space = {
+            'total_capacity_gb': total / units.Gi,
+            'free_capacity_gb': free / units.Gi,
+            'reserved_percentage': 0,
+            'QoS_support': False,
+        }
+
+        pools = [dict(space, pool_name='default')]
+
+        pools += [dict(space,
+                       pool_name='template_' + t.name,
+                       storpool_template=t.name
+                       ) for t in templates]
+
+        self._stats = {
+            'volume_backend_name': self.configuration.safe_get(
+                'volume_backend_name') or 'storpool',
+            'vendor_name': 'StorPool',
+            'driver_version': self.VERSION,
+            'storage_protocol': 'storpool',
+
+            'pools': pools
+        }
+
+    def _attach_volume(self, context, volume, properties, remote=False):
+        req_id = context.request_id
+        req = self._attach.get()[req_id]
+        name = req['volume']
+        self._attach.sync(req_id, None)
+        return {'device': {'path': '/dev/storpool/{v}'.format(v=name)}}
+
+    def _detach_volume(self, context, attach_info, volume, properties,
+                       force=False, remote=False):
+        req_id = context.request_id
+        req = self._attach.get()[req_id]
+        name = req['volume']
+        return self._attach.sync(req_id, name)
+
+    def backup_volume(self, context, backup, backup_service):
+        volume = self.db.volume_get(context, backup['volume_id'])
+        req_id = context.request_id
+        volname = self._attach.volumeName(volume['id'])
+        name = self._attach.volsnapName(volume['id'], req_id)
+        try:
+            self._attach.api().snapshotCreate(volname, {'name': name})
+        except spapi.ApiError as e:
+            raise self._backendException(e)
+        self._attach.add(req_id, {
+            'volume': name,
+            'type': 'backup',
+            'id': req_id,
+            'rights': 1,
+            'volsnap': True
+        })
+        try:
+            return super(StorPoolDriver, self).backup_volume(
+                context, backup, backup_service)
+        finally:
+            self._attach.remove(req_id)
+            try:
+                self._attach.api().snapshotDelete(name)
+            except spapi.ApiError as e:
+                LOG.error(
+                    _LE('Could not remove the temp snapshot {n} for {v}: {e}').
+                    format(n=name, v=volname, e=six.text_type(e)))
+                pass
+
+    def copy_volume_to_image(self, context, volume, image_service, image_meta):
+        req_id = context.request_id
+        volname = self._attach.volumeName(volume['id'])
+        name = self._attach.volsnapName(volume['id'], req_id)
+        try:
+            self._attach.api().snapshotCreate(volname, {'name': name})
+        except spapi.ApiError as e:
+            raise self._backendException(e)
+        self._attach.add(req_id, {
+            'volume': name,
+            'type': 'copy-from',
+            'id': req_id,
+            'rights': 1,
+            'volsnap': True
+        })
+        try:
+            return super(StorPoolDriver, self).copy_volume_to_image(
+                context, volume, image_service, image_meta)
+        finally:
+            self._attach.remove(req_id)
+            try:
+                self._attach.api().snapshotDelete(name)
+            except spapi.ApiError as e:
+                LOG.error(
+                    _LE('Could not remove the temp snapshot {n} for {v}: {e}').
+                    format(n=name, v=volname, e=six.text_type(e)))
+                pass
+
+    def copy_image_to_volume(self, context, volume, image_service, image_id):
+        req_id = context.request_id
+        name = self._attach.volumeName(volume['id'])
+        self._attach.add(req_id, {
+            'volume': name,
+            'type': 'copy-to',
+            'id': req_id,
+            'rights': 2
+        })
+        try:
+            return super(StorPoolDriver, self).copy_image_to_volume(
+                context, volume, image_service, image_id)
+        finally:
+            self._attach.remove(req_id)
+
+    def extend_volume(self, volume, new_size):
+        size = int(new_size) * units.Gi
+        name = self._attach.volumeName(volume['id'])
+        try:
+            upd = sptypes.VolumeUpdateDesc(size=size)
+            self._attach.api().volumeUpdate(name, upd)
+        except spapi.ApiError as e:
+            raise self._backendException(e)