]> review.fuel-infra Code Review - openstack-build/cinder-build.git/commitdiff
Fix ScaleIO driver does not honor clone size
authorMatan Sabag <matan.sabag@emc.com>
Sun, 13 Mar 2016 11:19:49 +0000 (04:19 -0700)
committerMatan Sabag <matan.sabag@emc.com>
Sun, 13 Mar 2016 11:23:06 +0000 (04:23 -0700)
Fixed a bug that when creating a volume from source the driver didn't honor the new volume size.
Added unit test.

Change-Id: I3a39953002432ac70a4ee2eccedd6737d9c3b019
Closes-Bug: #1554777

cinder/tests/unit/volume/drivers/emc/scaleio/test_create_cloned_volume.py
cinder/tests/unit/volume/drivers/emc/scaleio/test_extend_volume.py
cinder/volume/drivers/emc/scaleio.py

index a2ff534d6cddd6e0fa5eb5053ddd98d657af6c7c..e9ad27ec44ed887514357cfb45496abfa627bc8f 100644 (file)
@@ -98,3 +98,8 @@ class TestCreateClonedVolume(scaleio.TestScaleIODriver):
     def test_create_cloned_volume(self):
         self.set_https_response_mode(self.RESPONSE_MODE.Valid)
         self.driver.create_cloned_volume(self.new_volume, self.src_volume)
+
+    def test_create_cloned_volume_larger_size(self):
+        self.set_https_response_mode(self.RESPONSE_MODE.Valid)
+        self.new_volume.size = 2
+        self.driver.create_cloned_volume(self.new_volume, self.src_volume)
index 2b1ab30d3e0ffe11c4b9c134e689555edea1bd1d..2b704f9586137a311971097f914399d0354527be 100644 (file)
@@ -52,7 +52,7 @@ class TestExtendVolume(scaleio.TestScaleIODriver):
                 self.volume_name_2x_enc: '"{}"'.format(self.volume.id),
                 'instances/Volume::{}/action/setVolumeSize'.format(
                     self.volume.provider_id
-                ): 'OK',
+                ): mocks.MockHTTPSResponse({}, 200),
             },
             self.RESPONSE_MODE.BadStatus: {
                 'types/Volume/instances/getByName::' +
index 3ee76b8c462be2f22604eae0fa52120fc1d8f03b..e0b754f97a5d255b96a518c9266767abae4b67d3 100644 (file)
@@ -537,12 +537,15 @@ class ScaleIODriver(driver.VolumeDriver):
         This action will round up the volume to the nearest size that is
         a granularity of 8 GBs.
         """
-        vol_id = volume['provider_id']
+        return self._extend_volume(volume['provider_id'], volume.size,
+                                   new_size)
+
+    def _extend_volume(self, volume_id, old_size, new_size):
+        vol_id = volume_id
         LOG.info(_LI(
-                 "ScaleIO extend volume:"
-                 " volume %(volname)s to size %(new_size)s."),
-                 {'volname': vol_id,
-                  'new_size': new_size})
+            "ScaleIO extend volume: volume %(volname)s to size %(new_size)s."),
+            {'volname': vol_id,
+             'new_size': new_size})
 
         req_vars = {'server_ip': self.server_ip,
                     'server_port': self.server_port,
@@ -555,7 +558,7 @@ class ScaleIODriver(driver.VolumeDriver):
         # Round up the volume size so that it is a granularity of 8 GBs
         # because ScaleIO only supports volumes with a granularity of 8 GBs.
         volume_new_size = self._round_to_8_gran(new_size)
-        volume_real_old_size = self._round_to_8_gran(volume.size)
+        volume_real_old_size = self._round_to_8_gran(old_size)
         if volume_real_old_size == volume_new_size:
             return
 
@@ -566,14 +569,7 @@ class ScaleIODriver(driver.VolumeDriver):
                         volume_new_size)
 
         params = {'sizeInGB': six.text_type(volume_new_size)}
-        r = requests.post(
-            request,
-            data=json.dumps(params),
-            headers=self._get_headers(),
-            auth=(self.server_username,
-                  self.server_token),
-            verify=self._get_verify_cert())
-        r = self._check_response(r, request, False, params)
+        r, response = self._execute_scaleio_post_request(params, request)
 
         if r.status_code != OK_STATUS_CODE:
             response = r.json()
@@ -598,7 +594,11 @@ class ScaleIODriver(driver.VolumeDriver):
                  {'src': volume_id,
                   'tgt': snapname})
 
-        return self._snapshot_volume(volume_id, snapname)
+        ret = self._snapshot_volume(volume_id, snapname)
+        if volume.size > src_vref.size:
+            self._extend_volume(ret['provider_id'], src_vref.size, volume.size)
+
+        return ret
 
     def delete_volume(self, volume):
         """Deletes a self.logical volume"""