size_in_m = self.configuration.volume_clear_size
if not size_in_g:
+ LOG.warning(_("Size for volume: %s not found, "
+ "skipping secure delete.") % volume['name'])
return
if self.configuration.volume_clear == 'none':
if self.configuration.volume_clear == 'zero':
if size_in_m == 0:
- return self._copy_volume('/dev/zero', vol_path, size_in_g,
+ return self._copy_volume('/dev/zero',
+ vol_path, size_in_g,
clearing=True)
else:
clear_cmd = ['shred', '-n0', '-z', '-s%dMiB' % size_in_m]
"""Deletes a snapshot."""
if self._volume_not_present(self._escape_snapshot(snapshot['name'])):
# If the snapshot isn't present, then don't attempt to delete
+ LOG.warning(_("snapshot: %s not found, "
+ "skipping delete operations") % snapshot['name'])
return True
# TODO(yamahata): zeroing out the whole snapshot triggers COW.
</target>
""" % (name, path, chap_auth)
- LOG.info(_('Creating volume: %s') % vol_id)
+ LOG.info(_('Creating iscsi_target for: %s') % vol_id)
volumes_dir = FLAGS.volumes_dir
volume_path = os.path.join(volumes_dir, vol_id)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- LOG.info(_('Removing volume: %s') % vol_id)
+ LOG.info(_('Removing iscsi_target for: %s') % vol_id)
vol_uuid_file = FLAGS.volume_name_template % vol_id
volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
iqn,
run_as_root=True)
except exception.ProcessExecutionError, e:
- LOG.error(_("Failed to delete iscsi target for volume "
+ LOG.error(_("Failed to remove iscsi target for volume "
"id:%(vol_id)s.") % locals())
raise exception.ISCSITargetRemoveFailed(volume_id=vol_id)
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- LOG.info(_('Removing volume: %s') % vol_id)
+ LOG.info(_('Removing iscsi_target for volume: %s') % vol_id)
self._delete_logicalunit(tid, lun, **kwargs)
self._delete_target(tid, **kwargs)
vol_uuid_file = FLAGS.volume_name_template % vol_id
vol_id = name.split(':')[1]
- LOG.info(_('Creating volume: %s') % vol_id)
+ LOG.info(_('Creating iscsi_target for volume: %s') % vol_id)
# cinder-rtstool requires chap_auth, but unit tests don't provide it
chap_auth_userid = 'test_id'
return tid
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
- LOG.info(_('Removing volume: %s') % vol_id)
+ LOG.info(_('Removing iscsi_target: %s') % vol_id)
vol_uuid_name = 'volume-%s' % vol_id
iqn = '%s%s' % (FLAGS.iscsi_target_prefix, vol_uuid_name)
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
self.driver = importutils.import_object(
- volume_driver,
- configuration=self.configuration)
+ volume_driver,
+ configuration=self.configuration)
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
filter_properties = {}
volume_ref = self.db.volume_get(context, volume_id)
self._notify_about_volume_usage(context, volume_ref, "create.start")
- LOG.info(_("volume %s: creating"), volume_ref['name'])
# NOTE(vish): so we don't have to get volume from db again
# before passing it to the driver.
image_meta = None
if snapshot_id is not None:
+ LOG.info(_("volume %s: creating from snapshot"),
+ volume_ref['name'])
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
elif source_volid is not None:
+ LOG.info(_("volume %s: creating from existing volume"),
+ volume_ref['name'])
sourcevol_ref = self.db.volume_get(context, source_volid)
elif image_id is not None:
+ LOG.info(_("volume %s: creating from image"),
+ volume_ref['name'])
# create the volume from an image
image_service, image_id = \
glance.get_remote_image_service(context,
image_id)
image_location = image_service.get_location(context, image_id)
image_meta = image_service.show(context, image_id)
+ else:
+ LOG.info(_("volume %s: creating"), volume_ref['name'])
try:
model_update, cloned = self._create_volume(context,
with excutils.save_and_reraise_exception():
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
+ LOG.error(_("volume %s: create failed"), volume_ref['name'])
if snapshot_id:
# Copy any Glance metadata from the original volume
self.db.volume_update(context,
volume_ref['id'], {'status': status,
'launched_at': now})
- LOG.debug(_("volume %s: created successfully"), volume_ref['name'])
+ LOG.info(_("volume %s: created successfully"), volume_ref['name'])
self._reset_stats()
self._notify_about_volume_usage(context, volume_ref, "create.end")
"""Deletes and unexports volume."""
context = context.elevated()
volume_ref = self.db.volume_get(context, volume_id)
+ LOG.info(_("volume %s: deleting"), volume_ref['name'])
if volume_ref['attach_status'] == "attached":
# Volume is still attached, need to detach first
raise exception.VolumeAttached(volume_id=volume_id)
if volume_ref['host'] != self.host:
raise exception.InvalidVolume(
- reason=_("Volume is not local to this node"))
+ reason=_("volume is not local to this node"))
self._notify_about_volume_usage(context, volume_ref, "delete.start")
self._reset_stats()
self.db.volume_glance_metadata_delete_by_volume(context, volume_id)
self.db.volume_destroy(context, volume_id)
- LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
+ LOG.info(_("volume %s: deleted successfully"), volume_ref['name'])
self._notify_about_volume_usage(context, volume_ref, "delete.end")
# Commit the reservations
self.db.volume_glance_metadata_copy_to_snapshot(context,
snapshot_ref['id'],
volume_id)
- LOG.debug(_("snapshot %s: created successfully"), snapshot_ref['name'])
+ LOG.info(_("snapshot %s: created successfully"), snapshot_ref['name'])
return snapshot_id
def delete_snapshot(self, context, snapshot_id):
"""Deletes and unexports snapshot."""
context = context.elevated()
snapshot_ref = self.db.snapshot_get(context, snapshot_id)
+ LOG.info(_("snapshot %s: deleting"), snapshot_ref['name'])
try:
LOG.debug(_("snapshot %s: deleting"), snapshot_ref['name'])
self.db.volume_glance_metadata_delete_by_snapshot(context, snapshot_id)
self.db.snapshot_destroy(context, snapshot_id)
- LOG.debug(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
+ LOG.info(_("snapshot %s: deleted successfully"), snapshot_ref['name'])
return True
def attach_volume(self, context, volume_id, instance_uuid, mountpoint):