img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
- 'mount_point': '/opt/stack/data/glance',
+ 'mountpoint': '/opt/stack/data/glance',
+ 'id': 'abc-123',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
- location = drv._construct_image_nfs_url(img_loc)
- if location != "nfs://host/path/image-id":
- self.fail("Unexpected direct url.")
+
+ locations = drv._construct_image_nfs_url(img_loc)
+
+ self.assertIn("nfs://host/path/image-id", locations)
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
- location = drv._construct_image_nfs_url(img_loc)
- if location != "nfs://host/path/image-id":
- self.fail("Unexpected direct url.")
+
+ locations = drv._construct_image_nfs_url(img_loc)
+
+ self.assertIn("nfs://host/path/image-id", locations)
def test_get_pool(self):
pool = self._driver.get_pool({'provider_location': 'fake-share'})
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
- drv._construct_image_nfs_url = mock.Mock(return_value="")
+ drv._construct_image_nfs_url = mock.Mock(return_value=["nfs://1"])
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
- run_as_root=True,
+ run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
"""Clone directly in nfs share."""
LOG.info(_LI('Checking image clone %s from glance share.'), image_id)
cloned = False
- image_location = self._construct_image_nfs_url(image_location)
- share = self._is_cloneable_share(image_location)
+ image_locations = self._construct_image_nfs_url(image_location)
run_as_root = self._execute_as_root
- if share and self._is_share_vol_compatible(volume, share):
- LOG.debug('Share is cloneable %s', share)
- volume['provider_location'] = share
- (__, ___, img_file) = image_location.rpartition('/')
- dir_path = self._get_mount_point_for_share(share)
- img_path = '%s/%s' % (dir_path, img_file)
- img_info = image_utils.qemu_img_info(img_path,
- run_as_root=run_as_root)
- if img_info.file_format == 'raw':
- LOG.debug('Image is raw %s', image_id)
- self._clone_backing_file_for_volume(
- img_file, volume['name'],
- volume_id=None, share=share)
- cloned = True
- else:
- LOG.info(
- _LI('Image will locally be converted to raw %s'),
- image_id)
- dst = '%s/%s' % (dir_path, volume['name'])
- image_utils.convert_image(img_path, dst, 'raw',
- run_as_root=run_as_root)
- data = image_utils.qemu_img_info(dst, run_as_root=run_as_root)
- if data.file_format != "raw":
- raise exception.InvalidResults(
- _("Converted to raw, but"
- " format is now %s") % data.file_format)
- else:
+ for loc in image_locations:
+ share = self._is_cloneable_share(loc)
+ if share and self._is_share_vol_compatible(volume, share):
+ LOG.debug('Share is cloneable %s', share)
+ volume['provider_location'] = share
+ (__, ___, img_file) = loc.rpartition('/')
+ dir_path = self._get_mount_point_for_share(share)
+ img_path = '%s/%s' % (dir_path, img_file)
+ img_info = image_utils.qemu_img_info(img_path,
+ run_as_root=run_as_root)
+ if img_info.file_format == 'raw':
+ LOG.debug('Image is raw %s', image_id)
+ self._clone_backing_file_for_volume(
+ img_file, volume['name'],
+ volume_id=None, share=share)
cloned = True
- self._register_image_in_cache(
- volume, image_id)
+ break
+ else:
+ LOG.info(
+ _LI('Image will locally be converted to raw %s'),
+ image_id)
+ dst = '%s/%s' % (dir_path, volume['name'])
+ image_utils.convert_image(img_path, dst, 'raw',
+ run_as_root=run_as_root)
+ data = image_utils.qemu_img_info(dst,
+ run_as_root=run_as_root)
+ if data.file_format != "raw":
+ raise exception.InvalidResults(
+ _("Converted to raw, but"
+ " format is now %s") % data.file_format)
+ else:
+ cloned = True
+ self._register_image_in_cache(
+ volume, image_id)
+ break
return cloned
def _post_clone_image(self, volume):
It creates direct url from image_location
which is a tuple with direct_url and locations.
- Returns url with nfs scheme if nfs store
+ Returns array of urls with nfs scheme if nfs store
else returns url. It needs to be verified
by backend before use.
"""
if not direct_url and not locations:
raise exception.NotFound(_('Image location not present.'))
- # Locations will be always a list of one until
- # bp multiple-image-locations is introduced
+ urls = []
if not locations:
- return direct_url
- location = locations[0]
- url = location['url']
- if not location['metadata']:
- return url
- location_type = location['metadata'].get('type')
- if not location_type or location_type.lower() != "nfs":
- return url
- share_location = location['metadata'].get('share_location')
- mount_point = location['metadata'].get('mount_point')
- if not share_location or not mount_point:
- return url
- url_parse = urllib.parse.urlparse(url)
- abs_path = os.path.join(url_parse.netloc, url_parse.path)
- rel_path = os.path.relpath(abs_path, mount_point)
- direct_url = "%s/%s" % (share_location, rel_path)
- return direct_url
+ urls.append(direct_url)
+ else:
+ for location in locations:
+ url = location['url']
+ if not location['metadata']:
+ urls.append(url)
+ break
+ location_type = location['metadata'].get('type')
+ if not location_type or location_type.lower() != "nfs":
+ urls.append(url)
+ break
+ share_location = location['metadata'].get('share_location')
+ mountpoint = location['metadata'].get('mountpoint')
+ if not share_location or not mountpoint:
+ urls.append(url)
+ break
+ url_parse = urllib.parse.urlparse(url)
+ abs_path = os.path.join(url_parse.netloc, url_parse.path)
+ rel_path = os.path.relpath(abs_path, mountpoint)
+ direct_url = "%s/%s" % (share_location, rel_path)
+ urls.append(direct_url)
+ return urls
def extend_volume(self, volume, new_size):
"""Extend an existing volume to the new size."""
volume['id']))
dst_path = os.path.join(
self._get_export_path(volume['id']), volume['name'])
+ # Always run copy offload as regular user, it's sufficient
+ # and rootwrap doesn't allow copy offload to run as root
+ # anyways.
self._execute(col_path, src_ip, dst_ip,
src_path, dst_path,
- run_as_root=self._execute_as_root,
+ run_as_root=False,
check_exit_code=0)
self._register_image_in_cache(volume, image_id)
LOG.debug("Copied image from cache to volume %s using"
"""Copies from the image service using copy offload."""
LOG.debug("Trying copy from image service using copy offload.")
image_loc = image_service.get_location(context, image_id)
- image_loc = self._construct_image_nfs_url(image_loc)
- conn, dr = self._check_get_nfs_path_segs(image_loc)
- if conn:
- src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
- else:
+ locations = self._construct_image_nfs_url(image_loc)
+ src_ip = None
+ selected_loc = None
+ # this will match the first location that has a valid IP on cluster
+ for location in locations:
+ conn, dr = self._check_get_nfs_path_segs(location)
+ if conn:
+ try:
+ src_ip = self._get_ip_verify_on_cluster(conn.split(':')[0])
+ selected_loc = location
+ break
+ except Exception.NotFound:
+ pass
+ if src_ip is None:
raise exception.NotFound(_("Source host details not found."))
- (__, ___, img_file) = image_loc.rpartition('/')
+ (__, ___, img_file) = selected_loc.rpartition('/')
src_path = os.path.join(dr, img_file)
dst_ip = self._get_ip_verify_on_cluster(self._get_host_ip(
volume['id']))
('%s:%s' % (dst_ip, self._get_export_path(volume['id'])))):
dst_img_serv_path = os.path.join(
self._get_export_path(volume['id']), tmp_img_file)
+ # Always run copy offload as regular user, it's sufficient
+ # and rootwrap doesn't allow copy offload to run as root
+ # anyways.
self._execute(col_path, src_ip, dst_ip, src_path,
- dst_img_serv_path, run_as_root=run_as_root,
+ dst_img_serv_path, run_as_root=False,
check_exit_code=0)
else:
self._clone_file_dst_exists(dst_share, img_file, tmp_img_file)