]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Re-add DRBD driver
authorPhilipp Marek <philipp.marek@linbit.com>
Tue, 16 Jun 2015 06:45:44 +0000 (08:45 +0200)
committerPhilipp Marek <philipp.marek@linbit.com>
Tue, 16 Jun 2015 06:45:44 +0000 (08:45 +0200)
This reverts commit 37eef0b3d4d786d4b2b1a7e48f3e2339506e85e2.

We hope to have a -infra CI running by June 12th, to get included
in Liberty.

Change-Id: Icb7347d55dc5b22720269405b6d2b4053c2ad819

cinder/tests/test_drbdmanagedrv.py [new file with mode: 0644]
cinder/volume/drivers/drbdmanagedrv.py [new file with mode: 0644]

diff --git a/cinder/tests/test_drbdmanagedrv.py b/cinder/tests/test_drbdmanagedrv.py
new file mode 100644 (file)
index 0000000..8f91dc6
--- /dev/null
@@ -0,0 +1,318 @@
+# Copyright (c) 2014 LINBIT HA Solutions GmbH
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import collections
+
+import mock
+from oslo_log import log as logging
+from oslo_utils import importutils
+from oslo_utils import timeutils
+
+from cinder import context
+from cinder import test
+from cinder.volume import configuration as conf
+
+
+class mock_dbus(object):
+    def __init__(self):
+        pass
+
+    @staticmethod
+    def Array(defaults, signature=None):
+        return defaults
+
+
+class mock_dm_utils(object):
+
+    @staticmethod
+    def dict_to_aux_props(x):
+        return x
+
+
+class mock_dm_const(object):
+
+    TQ_GET_PATH = "get_path"
+
+
+class mock_dm_exc(object):
+
+    DM_SUCCESS = 0
+    DM_EEXIST = 1
+    DM_ENOENT = 2
+    DM_ERROR = 1000
+
+    pass
+
+
+import sys
+sys.modules['dbus'] = mock_dbus
+sys.modules['drbdmanage'] = collections.namedtuple(
+    'module', ['consts', 'exceptions', 'utils'])
+sys.modules['drbdmanage.utils'] = collections.namedtuple(
+    'module', ['dict_to_aux_props'])
+sys.modules['drbdmanage.consts'] = collections.namedtuple(
+    'module', [])
+sys.modules['drbdmanage.exceptions'] = collections.namedtuple(
+    'module', ['DM_EEXIST'])
+
+
+from cinder.volume.drivers import drbdmanagedrv
+
+
+LOG = logging.getLogger(__name__)
+
+
+def create_configuration(object):
+    configuration = mock.MockObject(conf.Configuration)
+    configuration.san_is_local = False
+    configuration.append_config_values(mock.IgnoreArg())
+    return configuration
+
+
+class DrbdManageFakeDriver(object):
+
+    resources = {}
+
+    def __init__(self):
+        self.calls = []
+
+    def list_resources(self, res, serial, prop, req):
+        self.calls.append(["list_resources", res, prop, req])
+        if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
+            return ([[mock_dm_exc.DM_ENOENT, "none", []]],
+                    [])
+        else:
+            return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
+                    [("res", dict(prop))])
+
+    def create_resource(self, res, props):
+        self.calls.append(["create_resource", res, props])
+        return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
+
+    def create_volume(self, res, size, props):
+        self.calls.append(["create_volume", res, size, props])
+        return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
+
+    def auto_deploy(self, res, red, delta, site_clients):
+        self.calls.append(["auto_deploy", res, red, delta, site_clients])
+        return [[mock_dm_exc.DM_SUCCESS, "ack", []] * red]
+
+    def list_volumes(self, res, ser, prop, req):
+        self.calls.append(["list_volumes", res, ser, prop, req])
+        if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
+            return ([[mock_dm_exc.DM_SUCCESS, "none", []]],
+                    [])
+        else:
+            return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
+                    [("res", dict(), [(2, dict(prop))])
+                     ])
+
+    def remove_volume(self, res, nr, force):
+        self.calls.append(["remove_volume", res, nr, force])
+        return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
+
+    def text_query(self, cmd):
+        self.calls.append(["text_query", cmd])
+        if cmd[0] == mock_dm_const.TQ_GET_PATH:
+            return ([(mock_dm_exc.DM_SUCCESS, "ack", [])], ['/dev/drbd0'])
+        return ([(mock_dm_exc.DM_ERROR, 'unknown command', [])], [])
+
+    def list_assignments(self, nodes, res, ser, prop, req):
+        self.calls.append(["list_assignments", nodes, res, ser, prop, req])
+        if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
+            return ([[mock_dm_exc.DM_SUCCESS, "none", []]],
+                    [])
+        else:
+            return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
+                    [("node", "res", dict(), [(2, dict(prop))])
+                     ])
+
+    def create_snapshot(self, res, snap, nodes, props):
+        self.calls.append(["create_snapshot", res, snap, nodes, props])
+        return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
+
+    def list_snapshots(self, res, sn, serial, prop, req):
+        self.calls.append(["list_snapshots", res, sn, serial, prop, req])
+        if 'cinder-id' in prop and prop['cinder-id'].startswith("deadbeef"):
+            return ([[mock_dm_exc.DM_SUCCESS, "none", []]],
+                    [])
+        else:
+            return ([[mock_dm_exc.DM_SUCCESS, "ACK", []]],
+                    [("res", [("snap", dict(prop))])
+                     ])
+
+    def remove_snapshot(self, res, snap, force):
+        self.calls.append(["remove_snapshot", res, snap, force])
+        return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
+
+    def resize_volume(self, res, vol, ser, size, delta):
+        self.calls.append(["resize_volume", res, vol, ser, size, delta])
+        return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
+
+    def restore_snapshot(self, res, snap, new, rprop, vprops):
+        self.calls.append(["restore_snapshot", res, snap, new, rprop, vprops])
+        return [[mock_dm_exc.DM_SUCCESS, "ack", []]]
+
+
+class DrbdManageTestCase(test.TestCase):
+
+    def setUp(self):
+        self.ctxt = context.get_admin_context()
+        self._mock = mock.Mock()
+        self.configuration = mock.Mock(conf.Configuration)
+        self.configuration.san_is_local = True
+        self.configuration.reserved_percentage = 1
+
+        super(DrbdManageTestCase, self).setUp()
+
+        self.stubs.Set(importutils, 'import_object',
+                       self.fake_import_object)
+        self.stubs.Set(drbdmanagedrv.DrbdManageDriver,
+                       'call_or_reconnect',
+                       self.fake_issue_dbus_call)
+        self.stubs.Set(drbdmanagedrv.DrbdManageDriver,
+                       'dbus_connect',
+                       self.fake_issue_dbus_connect)
+
+        sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_const \
+            = mock_dm_const
+        sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_utils \
+            = mock_dm_utils
+        sys.modules['cinder.volume.drivers.drbdmanagedrv'].dm_exc \
+            = mock_dm_exc
+
+        self.configuration.safe_get = lambda x: 'fake'
+
+    # Infrastructure
+    def fake_import_object(self, what, configuration, db, executor):
+        return None
+
+    def fake_issue_dbus_call(self, fn, *args):
+        return apply(fn, args)
+
+    def fake_issue_dbus_connect(self):
+        self.odm = DrbdManageFakeDriver()
+
+    def call_or_reconnect(self, method, *params):
+        return apply(method, params)
+
+    # Tests per se
+
+    def test_create_volume(self):
+        testvol = {'project_id': 'testprjid',
+                   'name': 'testvol',
+                   'size': 1,
+                   'id': 'deadbeef-8068-11e4-98c0-5254008ea111',
+                   'volume_type_id': 'drbdmanage',
+                   'created_at': timeutils.utcnow()}
+
+        dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
+        dmd.odm = DrbdManageFakeDriver()
+        dmd.create_volume(testvol)
+        self.assertEqual("create_resource", dmd.odm.calls[0][0])
+        self.assertEqual("list_volumes", dmd.odm.calls[1][0])
+        self.assertEqual("create_volume", dmd.odm.calls[2][0])
+        self.assertEqual(1048576, dmd.odm.calls[2][2])
+        self.assertEqual("auto_deploy", dmd.odm.calls[3][0])
+
+    def test_delete_volume(self):
+        testvol = {'project_id': 'testprjid',
+                   'name': 'testvol',
+                   'size': 1,
+                   'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
+                   'volume_type_id': 'drbdmanage',
+                   'created_at': timeutils.utcnow()}
+
+        dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
+        dmd.odm = DrbdManageFakeDriver()
+        dmd.delete_volume(testvol)
+        self.assertEqual("list_volumes", dmd.odm.calls[0][0])
+        self.assertEqual(testvol['id'], dmd.odm.calls[0][3]["cinder-id"])
+        self.assertEqual("remove_volume", dmd.odm.calls[1][0])
+
+    def test_local_path(self):
+        testvol = {'project_id': 'testprjid',
+                   'name': 'testvol',
+                   'size': 1,
+                   'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
+                   'volume_type_id': 'drbdmanage',
+                   'created_at': timeutils.utcnow()}
+
+        dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
+        dmd.odm = DrbdManageFakeDriver()
+        data = dmd.local_path(testvol)
+        self.assertTrue(data.startswith("/dev/drbd"))
+
+    def test_create_snapshot(self):
+        testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111',
+                    'volume_id': 'ba253fd0-8068-11e4-98c0-5254008ea111'}
+
+        dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
+        dmd.odm = DrbdManageFakeDriver()
+        dmd.create_snapshot(testsnap)
+        self.assertEqual("list_volumes", dmd.odm.calls[0][0])
+        self.assertEqual("list_assignments", dmd.odm.calls[1][0])
+        self.assertEqual("create_snapshot", dmd.odm.calls[2][0])
+        self.assertTrue('node' in dmd.odm.calls[2][3])
+
+    def test_delete_snapshot(self):
+        testsnap = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
+
+        dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
+        dmd.odm = DrbdManageFakeDriver()
+        dmd.delete_snapshot(testsnap)
+        self.assertEqual("list_snapshots", dmd.odm.calls[0][0])
+        self.assertEqual("remove_snapshot", dmd.odm.calls[1][0])
+
+    def test_extend_volume(self):
+        testvol = {'project_id': 'testprjid',
+                   'name': 'testvol',
+                   'size': 1,
+                   'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
+                   'volume_type_id': 'drbdmanage',
+                   'created_at': timeutils.utcnow()}
+
+        dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
+        dmd.odm = DrbdManageFakeDriver()
+        dmd.extend_volume(testvol, 5)
+        self.assertEqual("list_volumes", dmd.odm.calls[0][0])
+        self.assertEqual(testvol['id'], dmd.odm.calls[0][3]["cinder-id"])
+        self.assertEqual("resize_volume", dmd.odm.calls[1][0])
+        self.assertEqual("res", dmd.odm.calls[1][1])
+        self.assertEqual(2, dmd.odm.calls[1][2])
+        self.assertEqual(-1, dmd.odm.calls[1][3])
+        self.assertEqual(5242880, dmd.odm.calls[1][4]['size'])
+
+    def test_create_cloned_volume(self):
+        srcvol = {'project_id': 'testprjid',
+                  'name': 'testvol',
+                  'size': 1,
+                  'id': 'ba253fd0-8068-11e4-98c0-5254008ea111',
+                  'volume_type_id': 'drbdmanage',
+                  'created_at': timeutils.utcnow()}
+
+        newvol = {'id': 'ca253fd0-8068-11e4-98c0-5254008ea111'}
+
+        dmd = drbdmanagedrv.DrbdManageDriver(configuration=self.configuration)
+        dmd.odm = DrbdManageFakeDriver()
+        dmd.create_cloned_volume(newvol, srcvol)
+        self.assertEqual("list_volumes", dmd.odm.calls[0][0])
+        self.assertEqual("list_assignments", dmd.odm.calls[1][0])
+        self.assertEqual("create_snapshot", dmd.odm.calls[2][0])
+        self.assertEqual("list_snapshots", dmd.odm.calls[3][0])
+        self.assertEqual("restore_snapshot", dmd.odm.calls[4][0])
+        self.assertEqual("list_snapshots", dmd.odm.calls[5][0])
+        self.assertEqual("remove_snapshot", dmd.odm.calls[6][0])
+        self.assertEqual("remove_snapshot", dmd.odm.calls[6][0])
diff --git a/cinder/volume/drivers/drbdmanagedrv.py b/cinder/volume/drivers/drbdmanagedrv.py
new file mode 100644 (file)
index 0000000..8350c12
--- /dev/null
@@ -0,0 +1,528 @@
+# Copyright (c) 2014 LINBIT HA Solutions GmbH
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+
+"""
+
+This driver connects Cinder to an installed DRBDmanage instance, see
+  http://oss.linbit.com/drbdmanage/
+  http://git.linbit.com/drbdmanage.git/
+for more details.
+
+"""
+
+import six
+import uuid
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_utils import importutils
+from oslo_utils import units
+
+
+from cinder import exception
+from cinder.i18n import _, _LW, _LI
+from cinder.volume import driver
+
+try:
+    import dbus
+    import drbdmanage.consts as dm_const
+    import drbdmanage.exceptions as dm_exc
+    import drbdmanage.utils as dm_utils
+except ImportError:
+    dbus = None
+    dm_const = None
+    dm_exc = None
+    dm_utils = None
+
+
+LOG = logging.getLogger(__name__)
+
+drbd_opts = [
+    cfg.StrOpt('drbdmanage_redundancy',
+               default='1',
+               help='Number of nodes that should replicate the data.'),
+    # TODO(PM): offsite_redundancy?
+    # TODO(PM): choose DRBDmanage storage pool?
+]
+
+
+CONF = cfg.CONF
+CONF.register_opts(drbd_opts)
+
+
+CINDER_AUX_PROP_id = "cinder-id"
+DM_VN_PREFIX = 'CV_'  # sadly 2CV isn't allowed by DRBDmanage
+
+
+class DrbdManageDriver(driver.VolumeDriver):
+    """Cinder driver that uses DRBDmanage for storage."""
+
+    VERSION = '1.0.0'
+    drbdmanage_dbus_name = 'org.drbd.drbdmanaged'
+    drbdmanage_dbus_interface = '/interface'
+
+    def __init__(self, *args, **kwargs):
+        self.empty_list = dbus.Array([], signature="a(s)")
+        self.empty_dict = dbus.Array([], signature="a(ss)")
+        super(DrbdManageDriver, self).__init__(*args, **kwargs)
+        self.configuration.append_config_values(drbd_opts)
+        if not self.drbdmanage_dbus_name:
+            self.drbdmanage_dbus_name = 'org.drbd.drbdmanaged'
+        if not self.drbdmanage_dbus_interface:
+            self.drbdmanage_dbus_interface = '/interface'
+        self.drbdmanage_redundancy = int(getattr(self.configuration,
+                                                 'drbdmanage_redundancy', 1))
+        self.dm_control_vol = ".drbdctrl"
+
+        # Copied from the LVM driver, see
+        # I43190d1dac33748fe55fa00f260f32ab209be656
+        target_driver = self.target_mapping[
+            self.configuration.safe_get('iscsi_helper')]
+
+        LOG.debug('Attempting to initialize DRBD driver with the '
+                  'following target_driver: %s',
+                  target_driver)
+
+        self.target_driver = importutils.import_object(
+            target_driver,
+            configuration=self.configuration,
+            db=self.db,
+            executor=self._execute)
+
+    def dbus_connect(self):
+        self.odm = dbus.SystemBus().get_object(self.drbdmanage_dbus_name,
+                                               self.drbdmanage_dbus_interface)
+        self.odm.ping()
+
+    def call_or_reconnect(self, fn, *args):
+        """Call DBUS function; on a disconnect try once to reconnect."""
+        try:
+            return fn(*args)
+        except dbus.DBusException as e:
+            LOG.warning(_LW("Got disconnected; trying to reconnect. (%s)"), e)
+            self.dbus_connect()
+            # Old function object is invalid, get new one.
+            return getattr(self.odm, fn._method_name)(*args)
+
+    def do_setup(self, context):
+        """Any initialization the volume driver does while starting."""
+        super(DrbdManageDriver, self).do_setup(context)
+        self.dbus_connect()
+
+    def check_for_setup_error(self):
+        """Verify that requirements are in place to use DRBDmanage driver."""
+        if not all((dbus, dm_exc, dm_const, dm_utils)):
+            msg = _('DRBDmanage driver setup error: some required '
+                    'libraries (dbus, drbdmanage.*) not found.')
+            LOG.error(msg)
+            raise exception.VolumeDriverException(message=msg)
+        if self.odm.ping() != 0:
+            message = _('Cannot ping DRBDmanage backend')
+            raise exception.VolumeBackendAPIException(data=message)
+
+    def _clean_uuid(self):
+        """Returns a UUID string, WITHOUT braces."""
+        # Some uuid library versions put braces around the result!?
+        # We don't want them, just a plain [0-9a-f-]+ string.
+        id = str(uuid.uuid4())
+        return id.translate(None, "{}")
+
+    def _check_result(self, res, ignore=None, ret=0):
+        seen_success = False
+        seen_error = False
+        result = ret
+        for (code, fmt, arg_l) in res:
+            # convert from DBUS to Python
+            arg = dict(arg_l)
+            if ignore and code in ignore:
+                if not result:
+                    result = code
+                continue
+            if code == dm_exc.DM_SUCCESS:
+                seen_success = True
+                continue
+            seen_error = _("Received error string: %s") % (fmt % arg)
+
+        if seen_error:
+            raise exception.VolumeBackendAPIException(data=seen_error)
+        if seen_success:
+            return ret
+        # by default okay - or the ignored error code.
+        return ret
+
+    # DRBDmanage works in kiB units; Cinder uses GiB.
+    def _vol_size_to_dm(self, size):
+        return int(size * units.Gi / units.Ki)
+
+    def _vol_size_to_cinder(self, size):
+        return int(size * units.Ki / units.Gi)
+
+    def is_clean_volume_name(self, name):
+        try:
+            if (name.startswith(CONF.volume_name_template % "") and
+                    uuid.UUID(name[7:]) is not None):
+                return DM_VN_PREFIX + name[7:]
+        except ValueError:
+            return None
+
+        try:
+            if uuid.UUID(name) is not None:
+                return DM_VN_PREFIX + name
+        except ValueError:
+            return None
+
+    def _priv_hash_from_volume(self, volume):
+        return dm_utils.dict_to_aux_props({
+            CINDER_AUX_PROP_id: volume['id'],
+        })
+
+    def snapshot_name_from_cinder_snapshot(self, snapshot):
+        sn_name = self.is_clean_volume_name(snapshot['id'])
+        return sn_name
+
+    def _res_and_vl_data_for_volume(self, volume, empty_ok=False):
+        """Find DRBD resource and volume ID.
+
+        A DRBD resource might consist of several "volumes"
+        (think consistency groups).
+        So we have to find the number of the volume within one resource.
+        Returns resource name, volume number, and resource
+        and volume properties.
+        """
+
+        # If we get a string, use it as-is.
+        # Else it's a dictionary; then get the ID.
+        if isinstance(volume, six.string_types):
+            v_uuid = volume
+        else:
+            v_uuid = volume['id']
+
+        res, rl = self.call_or_reconnect(self.odm.list_volumes,
+                                         self.empty_dict,
+                                         0,
+                                         dm_utils.dict_to_aux_props(
+                                             {CINDER_AUX_PROP_id: v_uuid}),
+                                         self.empty_dict)
+        self._check_result(res)
+
+        if (not rl) or (len(rl) == 0):
+            if empty_ok:
+                LOG.debug("No volume %s found.", v_uuid)
+                return None, None, None, None
+            raise exception.VolumeBackendAPIException(
+                data=_("volume %s not found in drbdmanage") % v_uuid)
+        if len(rl) > 1:
+            raise exception.VolumeBackendAPIException(
+                data=_("multiple resources with name %s found by drbdmanage") %
+                v_uuid)
+
+        (r_name, r_props, vols) = rl[0]
+        if len(vols) != 1:
+            raise exception.VolumeBackendAPIException(
+                data=_("not exactly one volume with id %s") %
+                v_uuid)
+
+        (v_nr, v_props) = vols[0]
+
+        LOG.debug("volume %(uuid)s is %(res)s/%(nr)d; %(rprop)s, %(vprop)s",
+                  {'uuid': v_uuid, 'res': r_name, 'nr': v_nr,
+                   'rprop': r_props, 'vprop': v_props})
+
+        return r_name, v_nr, r_props, v_props
+
+    def _resource_and_snap_data_from_snapshot(self, snapshot, empty_ok=False):
+        """Find DRBD resource and snapshot name from the snapshot ID."""
+        s_uuid = snapshot['id']
+        res, rs = self.call_or_reconnect(self.odm.list_snapshots,
+                                         self.empty_dict,
+                                         self.empty_dict,
+                                         0,
+                                         dm_utils.dict_to_aux_props(
+                                             {CINDER_AUX_PROP_id: s_uuid}),
+                                         self.empty_dict)
+        self._check_result(res)
+
+        if (not rs) or (len(rs) == 0):
+            if empty_ok:
+                return None
+            else:
+                raise exception.VolumeBackendAPIException(
+                    data=_("no snapshot with id %s found in drbdmanage") %
+                    s_uuid)
+        if len(rs) > 1:
+            raise exception.VolumeBackendAPIException(
+                data=_("multiple resources with snapshot ID %s found") %
+                s_uuid)
+
+        (r_name, snaps) = rs[0]
+        if len(snaps) != 1:
+            raise exception.VolumeBackendAPIException(
+                data=_("not exactly one snapshot with id %s") % s_uuid)
+
+        (s_name, s_props) = snaps[0]
+
+        LOG.debug("snapshot %(uuid)s is %(res)s/%(snap)s",
+                  {'uuid': s_uuid, 'res': r_name, 'snap': s_name})
+
+        return r_name, s_name, s_props
+
+    def _resource_name_volnr_for_volume(self, volume, empty_ok=False):
+        res, vol, _, _ = self._res_and_vl_data_for_volume(volume, empty_ok)
+        return res, vol
+
+    def local_path(self, volume):
+        dres, dvol = self._resource_name_volnr_for_volume(volume)
+
+        res, data = self.call_or_reconnect(self.odm.text_query,
+                                           [dm_const.TQ_GET_PATH,
+                                            dres,
+                                            str(dvol)])
+        self._check_result(res)
+        if len(data) == 1:
+            return data[0]
+        message = _('Got bad path information from DRBDmanage! (%s)') % data
+        raise exception.VolumeBackendAPIException(data=message)
+
+    def create_volume(self, volume):
+        """Creates a DRBD resource.
+
+        We address it later on via the ID that gets stored
+        as a private property.
+        """
+
+        # TODO(PM): consistency groups
+        dres = self.is_clean_volume_name(volume['id'])
+
+        res = self.call_or_reconnect(self.odm.create_resource,
+                                     dres,
+                                     self.empty_dict)
+        self._check_result(res, ignore=[dm_exc.DM_EEXIST], ret=None)
+
+        # If we get DM_EEXIST, then the volume already exists, eg. because
+        # deploy gave an error on a previous try (like ENOSPC).
+        # Still, there might or might not be the volume in the resource -
+        # we have to check that explicitly.
+        (_, drbd_vol) = self._resource_name_volnr_for_volume(volume,
+                                                             empty_ok=True)
+        if not drbd_vol:
+            props = self._priv_hash_from_volume(volume)
+            # TODO(PM): properties - redundancy, etc
+            res = self.call_or_reconnect(self.odm.create_volume,
+                                         dres,
+                                         self._vol_size_to_dm(volume['size']),
+                                         props)
+            self._check_result(res)
+
+        # If we crashed between create_volume and the deploy call,
+        # the volume might be defined but not exist on any server. Oh my.
+        res = self.call_or_reconnect(self.odm.auto_deploy,
+                                     dres, self.drbdmanage_redundancy,
+                                     0, True)
+        self._check_result(res)
+
+        return 0
+
+    def delete_volume(self, volume):
+        """Deletes a resource."""
+        dres, dvol = self._resource_name_volnr_for_volume(
+            volume,
+            empty_ok=True)
+
+        if not dres:
+            # OK, already gone.
+            return True
+
+        # TODO(PM): check if in use? Ask whether Primary, or just check result?
+        res = self.call_or_reconnect(self.odm.remove_volume, dres, dvol, False)
+        self._check_result(res, ignore=[dm_exc.DM_ENOENT])
+
+        res, rl = self.call_or_reconnect(self.odm.list_volumes,
+                                         [dres],
+                                         0,
+                                         self.empty_dict,
+                                         self.empty_list)
+        self._check_result(res)
+
+        # We expect the _resource_ to be here still (we just got a volnr from
+        # it!), so just query the volumes.
+        # If the resource has no volumes anymore, the current DRBDmanage
+        # version (errorneously, IMO) returns no *resource*, too.
+        if len(rl) > 1:
+            message = _('DRBDmanage expected one resource ("%(res)s"), '
+                        'got %(n)d') % {'res': dres, 'n': len(rl)}
+            raise exception.VolumeBackendAPIException(data=message)
+
+        # Delete resource, if empty
+        if (not rl) or (not rl[0]) or (len(rl[0][2]) == 0):
+            res = self.call_or_reconnect(self.odm.remove_resource, dres, False)
+            self._check_result(res, ignore=[dm_exc.DM_ENOENT])
+
+    def create_volume_from_snapshot(self, volume, snapshot):
+        """Creates a volume from a snapshot."""
+
+        LOG.debug("create vol from snap: from %(snap)s make %(vol)s",
+                  {'snap': snapshot['id'], 'vol': volume['id']})
+        # TODO(PM): Consistency groups.
+        dres, sname, sprop = self._resource_and_snap_data_from_snapshot(
+            snapshot)
+
+        new_res = self.is_clean_volume_name(volume['id'])
+
+        r_props = self.empty_dict
+        # TODO(PM): consistency groups => different volume number possible
+        v_props = [(0, self._priv_hash_from_volume(volume))]
+
+        res = self.call_or_reconnect(self.odm.restore_snapshot,
+                                     new_res,
+                                     dres,
+                                     sname,
+                                     r_props,
+                                     v_props)
+        return self._check_result(res, ignore=[dm_exc.DM_ENOENT])
+
+    def create_cloned_volume(self, volume, src_vref):
+        temp_id = self._clean_uuid()
+        snapshot = {'id': temp_id}
+
+        self.create_snapshot(dict(snapshot.items() +
+                                  [('volume_id', src_vref['id'])]))
+
+        self.create_volume_from_snapshot(volume, snapshot)
+
+        self.delete_snapshot(snapshot)
+
+    def _update_volume_stats(self):
+        data = {}
+
+        data["vendor_name"] = 'Open Source'
+        data["driver_version"] = self.VERSION
+        data["storage_protocol"] = self.target_driver.protocol
+        # This has to match the name set in the cinder volume driver spec,
+        # so keep it lowercase
+        data["volume_backend_name"] = "drbdmanage"
+        data["pools"] = []
+
+        res, free, total = self.call_or_reconnect(self.odm.cluster_free_query,
+                                                  self.drbdmanage_redundancy)
+        self._check_result(res)
+
+        location_info = ('DrbdManageDriver:%(cvol)s:%(dbus)s' %
+                         {'cvol': self.dm_control_vol,
+                          'dbus': self.drbdmanage_dbus_name})
+
+        # TODO(PM): multiple DRBDmanage instances and/or multiple pools
+        single_pool = {}
+        single_pool.update(dict(
+            pool_name=data["volume_backend_name"],
+            free_capacity_gb=self._vol_size_to_cinder(free),
+            total_capacity_gb=self._vol_size_to_cinder(total),
+            reserved_percentage=self.configuration.reserved_percentage,
+            location_info=location_info,
+            QoS_support=False))
+
+        data["pools"].append(single_pool)
+
+        self._stats = data
+
+    def get_volume_stats(self, refresh=True):
+        """Get volume status."""
+
+        self._update_volume_stats()
+        return self._stats
+
+    def extend_volume(self, volume, new_size):
+        dres, dvol = self._resource_name_volnr_for_volume(volume)
+
+        res = self.call_or_reconnect(self.odm.resize_volume,
+                                     dres, dvol, -1,
+                                     {"size": self._vol_size_to_dm(new_size)},
+                                     0)
+        self._check_result(res)
+        return 0
+
+    def create_snapshot(self, snapshot):
+        """Creates a snapshot."""
+        sn_name = self.snapshot_name_from_cinder_snapshot(snapshot)
+
+        dres, dvol = self._resource_name_volnr_for_volume(
+            snapshot["volume_id"])
+
+        res, data = self.call_or_reconnect(self.odm.list_assignments,
+                                           self.empty_dict,
+                                           [dres],
+                                           0,
+                                           self.empty_dict,
+                                           self.empty_dict)
+        self._check_result(res)
+
+        nodes = map(lambda d: d[0], data)
+        if len(nodes) < 1:
+            raise exception.VolumeBackendAPIException(
+                _('Snapshot res "%s" that is not deployed anywhere?') %
+                (dres))
+
+        props = self._priv_hash_from_volume(snapshot)
+        res = self.call_or_reconnect(self.odm.create_snapshot,
+                                     dres, sn_name, nodes, props)
+        self._check_result(res)
+
+    def delete_snapshot(self, snapshot):
+        """Deletes a snapshot."""
+
+        dres, sname, _ = self._resource_and_snap_data_from_snapshot(
+            snapshot, empty_ok=True)
+
+        if not dres:
+            # resource already gone?
+            LOG.warning(_LW("snapshot: %s not found, "
+                            "skipping delete operation"), snapshot['id'])
+            LOG.info(_LI('Successfully deleted snapshot: %s'), snapshot['id'])
+            return True
+
+        res = self.call_or_reconnect(self.odm.remove_snapshot,
+                                     dres, sname, True)
+        return self._check_result(res, ignore=[dm_exc.DM_ENOENT])
+
+    # #######  Interface methods for DataPath (Target Driver) ########
+
+    def ensure_export(self, context, volume):
+        volume_path = self.local_path(volume)
+        return self.target_driver.ensure_export(
+            context,
+            volume,
+            volume_path)
+
+    def create_export(self, context, volume):
+        volume_path = self.local_path(volume)
+        export_info = self.target_driver.create_export(
+            context,
+            volume,
+            volume_path)
+
+        return {'provider_location': export_info['location'],
+                'provider_auth': export_info['auth'], }
+
+    def remove_export(self, context, volume):
+        return self.target_driver.remove_export(context, volume)
+
+    def initialize_connection(self, volume, connector):
+        return self.target_driver.initialize_connection(volume, connector)
+
+    def validate_connector(self, connector):
+        return self.target_driver.validate_connector(connector)
+
+    def terminate_connection(self, volume, connector, **kwargs):
+        return None