# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import eventlet
from heat.openstack.common import cfg
from heat.openstack.common import importutils
logger = logging.getLogger(__name__)
+from heat.common import exception
from heat.common import heat_keystoneclient as hkc
from novaclient import client as novaclient
try:
logger.info('quantumclient not available')
try:
from cinderclient.v1 import client as cinderclient
+ from cinderclient import exceptions as cinder_exceptions
except ImportError:
cinderclient = None
logger.info('cinderclient not available')
return self._cinder
+ def attach_volume_to_instance(self, server_id, volume_id, device_id):
+ logger.warn('Attaching InstanceId %s VolumeId %s Device %s' %
+ (server_id, volume_id, device_id))
+
+ va = self.nova().volumes.create_server_volume(
+ server_id=server_id,
+ volume_id=volume_id,
+ device=device_id)
+
+ vol = self.cinder().volumes.get(va.id)
+ while vol.status == 'available' or vol.status == 'attaching':
+ eventlet.sleep(1)
+ vol.get()
+ if vol.status == 'in-use':
+ return va.id
+ else:
+ raise exception.Error(vol.status)
+
+ def detach_volume_from_instance(self, server_id, volume_id):
+ logger.info('VolumeAttachment un-attaching %s %s' %
+ (server_id, volume_id))
+
+ try:
+ vol = self.cinder().volumes.get(volume_id)
+ except cinder_exceptions.NotFound:
+ logger.warning('Volume %s - not found' %
+ (volume_id))
+ return
+ try:
+ self.nova().volumes.delete_server_volume(server_id,
+ volume_id)
+ except novaclient.exceptions.NotFound:
+ logger.warning('Deleting VolumeAttachment %s %s - not found' %
+ (server_id, volume_id))
+ try:
+ logger.info('un-attaching %s, status %s' % (volume_id, vol.status))
+ while vol.status == 'in-use':
+ logger.info('trying to un-attach %s, but still %s' %
+ (volume_id, vol.status))
+ eventlet.sleep(1)
+ try:
+ self.nova().volumes.delete_server_volume(
+ server_id,
+ volume_id)
+ except Exception:
+ pass
+ vol.get()
+ logger.info('volume status of %s now %s' % (volume_id, vol.status))
+ except cinder_exceptions.NotFound:
+ logger.warning('Volume %s - not found' %
+ (volume_id))
+
if cfg.CONF.cloud_backend:
cloud_backend_module = importutils.import_module(cfg.CONF.cloud_backend)
'AllowedValues': ['dedicated', 'default'],
'Implemented': False},
'UserData': {'Type': 'String'},
- 'Volumes': {'Type': 'List',
- 'Implemented': False}}
+ 'Volumes': {'Type': 'List'}}
# template keys supported for handle_update, note trailing comma
# is required for a single item to get a tuple not a string
('nova reported unexpected',
self.name, server.status))
+ if self.properties['Volumes']:
+ self.attach_volumes()
+
+ def attach_volumes(self):
+ server_id = self.resource_id
+ for vol in self.properties['Volumes']:
+ if 'DeviceId' in vol:
+ dev = vol['DeviceId']
+ else:
+ dev = vol['Device']
+ self.stack.clients.attach_volume_to_instance(server_id,
+ vol['VolumeId'],
+ dev)
+
+ def detach_volumes(self):
+ server_id = self.resource_id
+ for vol in self.properties['Volumes']:
+ self.stack.clients.detach_volume_from_instance(server_id,
+ vol['VolumeId'])
+
def handle_update(self, json_snippet):
status = self.UPDATE_REPLACE
try:
'''
if self.resource_id is None:
return
+
+ if self.properties['Volumes']:
+ self.detach_volumes()
+
try:
server = self.nova().servers.get(self.resource_id)
except clients.novaclient.exceptions.NotFound:
import eventlet
from heat.openstack.common import log as logging
-from heat.engine import clients
from heat.common import exception
from heat.engine import resource
def handle_create(self):
server_id = self.properties['InstanceId']
volume_id = self.properties['VolumeId']
- logger.warn('Attaching InstanceId %s VolumeId %s Device %s' %
- (server_id, volume_id, self.properties['Device']))
- va = self.nova().volumes.create_server_volume(
- server_id=server_id,
- volume_id=volume_id,
- device=self.properties['Device'])
-
- vol = self.cinder().volumes.get(va.id)
-
- while vol.status == 'available' or vol.status == 'attaching':
- eventlet.sleep(1)
- vol.get()
- if vol.status == 'in-use':
- self.resource_id_set(va.id)
- else:
- raise exception.Error(vol.status)
+ dev = self.properties['Device']
+ inst = self.stack.clients.attach_volume_to_instance(server_id,
+ volume_id,
+ dev)
+ self.resource_id_set(inst)
def handle_update(self, json_snippet):
return self.UPDATE_REPLACE
def handle_delete(self):
server_id = self.properties['InstanceId']
volume_id = self.properties['VolumeId']
- logger.info('VolumeAttachment un-attaching %s %s' %
- (server_id, volume_id))
-
- try:
- vol = self.cinder().volumes.get(volume_id)
-
- self.nova().volumes.delete_server_volume(server_id,
- volume_id)
-
- logger.info('un-attaching %s, status %s' % (volume_id, vol.status))
- while vol.status == 'in-use':
- logger.info('trying to un-attach %s, but still %s' %
- (volume_id, vol.status))
- eventlet.sleep(1)
- try:
- self.nova().volumes.delete_server_volume(
- server_id,
- volume_id)
- except Exception:
- pass
- vol.get()
- logger.info('volume status of %s now %s' % (volume_id, vol.status))
- except clients.novaclient.exceptions.NotFound as e:
- logger.warning('Deleting VolumeAttachment %s %s - not found' %
- (server_id, volume_id))
+ self.stack.clients.detach_volume_from_instance(server_id, volume_id)
def resource_mapping():
from heat.common import template_format
from heat.engine import parser
from heat.engine.resources import volume as vol
+from heat.engine import clients
from heat.tests.v1_1 import fakes
def setUp(self):
self.m = mox.Mox()
self.fc = fakes.FakeClient()
- self.m.StubOutWithMock(vol.Volume, 'cinder')
- self.m.StubOutWithMock(vol.VolumeAttachment, 'cinder')
- self.m.StubOutWithMock(vol.VolumeAttachment, 'nova')
+ self.m.StubOutWithMock(clients.OpenStackClients, 'cinder')
+ self.m.StubOutWithMock(clients.OpenStackClients, 'nova')
self.m.StubOutWithMock(self.fc.volumes, 'create')
self.m.StubOutWithMock(self.fc.volumes, 'get')
self.m.StubOutWithMock(self.fc.volumes, 'delete')
stack_name = 'test_volume_stack'
# create script
- vol.Volume.cinder().MultipleTimes().AndReturn(self.fc)
+ clients.OpenStackClients.cinder().MultipleTimes().AndReturn(self.fc)
self.fc.volumes.create(
u'1', display_description='%s.DataVolume' % stack_name,
display_name='%s.DataVolume' % stack_name).AndReturn(fv)
stack_name = 'test_volume_create_error_stack'
# create script
- vol.Volume.cinder().AndReturn(self.fc)
+ clients.OpenStackClients.cinder().AndReturn(self.fc)
self.fc.volumes.create(
u'1', display_description='%s.DataVolume' % stack_name,
display_name='%s.DataVolume' % stack_name).AndReturn(fv)
stack_name = 'test_volume_attach_error_stack'
# volume create
- vol.Volume.cinder().MultipleTimes().AndReturn(self.fc)
+ clients.OpenStackClients.cinder().MultipleTimes().AndReturn(self.fc)
self.fc.volumes.create(
u'1', display_description='%s.DataVolume' % stack_name,
display_name='%s.DataVolume' % stack_name).AndReturn(fv)
# create script
- vol.VolumeAttachment.nova().MultipleTimes().AndReturn(self.fc)
- vol.VolumeAttachment.cinder().MultipleTimes().AndReturn(self.fc)
+ clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
+# clients.OpenStackClients.cinder().MultipleTimes().AndReturn(self.fc)
eventlet.sleep(1).MultipleTimes().AndReturn(None)
self.fc.volumes.create_server_volume(
stack_name = 'test_volume_attach_stack'
# volume create
- vol.Volume.cinder().MultipleTimes().AndReturn(self.fc)
+ clients.OpenStackClients.cinder().MultipleTimes().AndReturn(self.fc)
self.fc.volumes.create(
u'1', display_description='%s.DataVolume' % stack_name,
display_name='%s.DataVolume' % stack_name).AndReturn(fv)
# create script
- vol.VolumeAttachment.nova().MultipleTimes().AndReturn(self.fc)
- vol.VolumeAttachment.cinder().MultipleTimes().AndReturn(self.fc)
+ clients.OpenStackClients.nova().MultipleTimes().AndReturn(self.fc)
+ #clients.OpenStackClients.cinder().MultipleTimes().AndReturn(self.fc)
eventlet.sleep(1).MultipleTimes().AndReturn(None)
self.fc.volumes.create_server_volume(
device=u'/dev/vdc',
--- /dev/null
+{
+ "AWSTemplateFormatVersion" : "2010-09-09",
+
+ "Description" : "This template creates an instance and an EBS Volume.",
+
+ "Parameters" : {
+
+ "KeyName" : {
+ "Description" : "Name of an existing EC2 KeyPair to enable SSH access to the instance",
+ "Type" : "String"
+ },
+
+ "InstanceType" : {
+ "Description" : "WebServer EC2 instance type",
+ "Type" : "String",
+ "Default" : "m1.large",
+ "AllowedValues" : [ "t1.micro", "m1.small", "m1.large", "m1.xlarge", "m2.xlarge", "m2.2xlarge", "m2.4xlarge", "c1.medium", "c1.xlarge", "cc1.4xlarge" ],
+ "ConstraintDescription" : "must be a valid EC2 instance type."
+ },
+
+ "VolumeSize" : {
+ "Description" : "WikiDatabase Volume size",
+ "Type" : "Number",
+ "Default" : "1",
+ "MinValue" : "1",
+ "MaxValue" : "1024",
+ "ConstraintDescription" : "must be between 1 and 1024 Gb."
+ },
+
+ "LinuxDistribution": {
+ "Default": "F17",
+ "Description" : "Distribution of choice",
+ "Type": "String",
+ "AllowedValues" : [ "F16", "F17", "U10", "RHEL-6.1", "RHEL-6.2", "RHEL-6.3" ]
+ }
+ },
+
+ "Mappings" : {
+ "AWSInstanceType2Arch" : {
+ "t1.micro" : { "Arch" : "32" },
+ "m1.small" : { "Arch" : "32" },
+ "m1.large" : { "Arch" : "64" },
+ "m1.xlarge" : { "Arch" : "64" },
+ "m2.xlarge" : { "Arch" : "64" },
+ "m2.2xlarge" : { "Arch" : "64" },
+ "m2.4xlarge" : { "Arch" : "64" },
+ "c1.medium" : { "Arch" : "32" },
+ "c1.xlarge" : { "Arch" : "64" },
+ "cc1.4xlarge" : { "Arch" : "64" }
+ },
+ "DistroArch2AMI": {
+ "F16" : { "32" : "F16-i386-cfntools", "64" : "F16-x86_64-cfntools" },
+ "F17" : { "32" : "F17-i386-cfntools", "64" : "F17-x86_64-cfntools" },
+ "U10" : { "32" : "U10-i386-cfntools", "64" : "U10-x86_64-cfntools" },
+ "RHEL-6.1" : { "32" : "rhel61-i386-cfntools", "64" : "rhel61-x86_64-cfntools" },
+ "RHEL-6.2" : { "32" : "rhel62-i386-cfntools", "64" : "rhel62-x86_64-cfntools" },
+ "RHEL-6.3" : { "32" : "rhel63-i386-cfntools", "64" : "rhel63-x86_64-cfntools" }
+ }
+ },
+
+ "Resources" : {
+ "Ec2Instance" : {
+ "Type" : "AWS::EC2::Instance",
+ "Properties" : {
+ "SecurityGroups" : [ { "Ref" : "InstanceSecurityGroup" } ],
+ "ImageId" : { "Fn::FindInMap" : [ "DistroArch2AMI", { "Ref" : "LinuxDistribution" },
+ { "Fn::FindInMap" : [ "AWSInstanceType2Arch", { "Ref" : "InstanceType" }, "Arch" ] } ] },
+ "InstanceType" : { "Ref" : "InstanceType" },
+ "KeyName" : { "Ref" : "KeyName" },
+ "Volumes" : [
+ { "VolumeId" : { "Ref" : "NewVolume" },
+ "Device" : "/dev/vdc1"
+ }
+ ]
+ }
+ },
+
+ "InstanceSecurityGroup" : {
+ "Type" : "AWS::EC2::SecurityGroup",
+ "Properties" : {
+ "GroupDescription" : "Enable SSH access via port 22",
+ "SecurityGroupIngress" : [ {
+ "IpProtocol" : "tcp",
+ "FromPort" : "22",
+ "ToPort" : "22",
+ "CidrIp" : "0.0.0.0/0"
+ }]
+ }
+ },
+
+ "NewVolume" : {
+ "Type" : "AWS::EC2::Volume",
+ "Properties" : {
+ "Size" : { "Ref" : "VolumeSize" },
+ "AvailabilityZone" : { "Fn::GetAtt" : [ "Ec2Instance", "AvailabilityZone" ]}
+ }
+ }
+ },
+
+ "Outputs" : {
+ "InstanceId" : {
+ "Description" : "InstanceId of the newly created EC2 instance",
+ "Value" : { "Ref" : "Ec2Instance" }
+ },
+ "PublicIP" : {
+ "Description" : "Public IP address of the newly created EC2 instance",
+ "Value" : { "Fn::GetAtt" : [ "Ec2Instance", "PublicIp" ] }
+ },
+ "PublicDNS" : {
+ "Description" : "Public DNSName of the newly created EC2 instance",
+ "Value" : { "Fn::GetAtt" : [ "Ec2Instance", "PublicDnsName" ] }
+ }
+ }
+}