From b46a0afc37ce2f93bcf3527593eec1097792cae5 Mon Sep 17 00:00:00 2001 From: John Griffith Date: Fri, 6 Jul 2012 18:28:53 -0600 Subject: [PATCH] Remove unused db api methods * Implements cinder blueprint remove-extra-dbapi-methods * Related to cinder initial-db-cleanup: Remove db api methods associated with removed tables Modify/Remove tests associated with removed tables Remove non-existing db API calls from volume/api Change-Id: I60e0c6ffad9c78a29d56a27a04e9ec5ebaa1f251 --- cinder/db/api.py | 938 +++--------------- cinder/db/sqlalchemy/api.py | 515 ++++------ cinder/openstack/common/timeutils.py | 109 ++ .../tests/notifier/test_capacity_notifier.py | 59 -- openstack-common.conf | 2 +- 5 files changed, 393 insertions(+), 1230 deletions(-) create mode 100644 cinder/openstack/common/timeutils.py delete mode 100644 cinder/tests/notifier/test_capacity_notifier.py diff --git a/cinder/db/api.py b/cinder/db/api.py index 8c0f58e47..8b736875c 100644 --- a/cinder/db/api.py +++ b/cinder/db/api.py @@ -74,12 +74,7 @@ IMPL = utils.LazyPluggable('db_backend', sqlalchemy='cinder.db.sqlalchemy.api') -class NoMoreNetworks(exception.Error): - """No more available networks.""" - pass - - -class NoMoreTargets(exception.Error): +class NoMoreTargets(exception.CinderException): """No more available targets""" pass @@ -160,198 +155,6 @@ def service_update(context, service_id, values): ################### - - -def compute_node_get(context, compute_id): - """Get an computeNode or raise if it does not exist.""" - return IMPL.compute_node_get(context, compute_id) - - -def compute_node_get_all(context): - """Get all computeNodes.""" - return IMPL.compute_node_get_all(context) - - -def compute_node_create(context, values): - """Create a computeNode from the values dictionary.""" - return IMPL.compute_node_create(context, values) - - -def compute_node_update(context, compute_id, values, auto_adjust=True): - """Set the given properties on an computeNode and update it. - - Raises NotFound if computeNode does not exist. - """ - return IMPL.compute_node_update(context, compute_id, values, auto_adjust) - - -def compute_node_get_by_host(context, host): - return IMPL.compute_node_get_by_host(context, host) - - -def compute_node_utilization_update(context, host, free_ram_mb_delta=0, - free_disk_gb_delta=0, work_delta=0, vm_delta=0): - return IMPL.compute_node_utilization_update(context, host, - free_ram_mb_delta, free_disk_gb_delta, work_delta, - vm_delta) - - -def compute_node_utilization_set(context, host, free_ram_mb=None, - free_disk_gb=None, work=None, vms=None): - return IMPL.compute_node_utilization_set(context, host, free_ram_mb, - free_disk_gb, work, vms) - -################### - - -def certificate_create(context, values): - """Create a certificate from the values dictionary.""" - return IMPL.certificate_create(context, values) - - -def certificate_get_all_by_project(context, project_id): - """Get all certificates for a project.""" - return IMPL.certificate_get_all_by_project(context, project_id) - - -def certificate_get_all_by_user(context, user_id): - """Get all certificates for a user.""" - return IMPL.certificate_get_all_by_user(context, user_id) - - -def certificate_get_all_by_user_and_project(context, user_id, project_id): - """Get all certificates for a user and project.""" - return IMPL.certificate_get_all_by_user_and_project(context, - user_id, - project_id) - - -################### - -def floating_ip_get(context, id): - return IMPL.floating_ip_get(context, id) - - -def floating_ip_get_pools(context): - """Returns a list of floating ip pools""" - return IMPL.floating_ip_get_pools(context) - - -def floating_ip_allocate_address(context, project_id, pool): - """Allocate free floating ip from specified pool and return the address. - - Raises if one is not available. - - """ - return IMPL.floating_ip_allocate_address(context, project_id, pool) - - -def floating_ip_create(context, values): - """Create a floating ip from the values dictionary.""" - return IMPL.floating_ip_create(context, values) - - -def floating_ip_count_by_project(context, project_id): - """Count floating ips used by project.""" - return IMPL.floating_ip_count_by_project(context, project_id) - - -def floating_ip_deallocate(context, address): - """Deallocate an floating ip by address.""" - return IMPL.floating_ip_deallocate(context, address) - - -def floating_ip_destroy(context, address): - """Destroy the floating_ip or raise if it does not exist.""" - return IMPL.floating_ip_destroy(context, address) - - -def floating_ip_disassociate(context, address): - """Disassociate an floating ip from a fixed ip by address. - - :returns: the address of the existing fixed ip. - - """ - return IMPL.floating_ip_disassociate(context, address) - - -def floating_ip_fixed_ip_associate(context, floating_address, - fixed_address, host): - """Associate an floating ip to a fixed_ip by address.""" - return IMPL.floating_ip_fixed_ip_associate(context, - floating_address, - fixed_address, - host) - - -def floating_ip_get_all(context): - """Get all floating ips.""" - return IMPL.floating_ip_get_all(context) - - -def floating_ip_get_all_by_host(context, host): - """Get all floating ips by host.""" - return IMPL.floating_ip_get_all_by_host(context, host) - - -def floating_ip_get_all_by_project(context, project_id): - """Get all floating ips by project.""" - return IMPL.floating_ip_get_all_by_project(context, project_id) - - -def floating_ip_get_by_address(context, address): - """Get a floating ip by address or raise if it doesn't exist.""" - return IMPL.floating_ip_get_by_address(context, address) - - -def floating_ip_get_by_fixed_address(context, fixed_address): - """Get a floating ips by fixed address""" - return IMPL.floating_ip_get_by_fixed_address(context, fixed_address) - - -def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id): - """Get a floating ips by fixed address""" - return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id) - - -def floating_ip_update(context, address, values): - """Update a floating ip by address or raise if it doesn't exist.""" - return IMPL.floating_ip_update(context, address, values) - - -def floating_ip_set_auto_assigned(context, address): - """Set auto_assigned flag to floating ip""" - return IMPL.floating_ip_set_auto_assigned(context, address) - - -def dnsdomain_list(context): - """Get a list of all zones in our database, public and private.""" - return IMPL.dnsdomain_list(context) - - -def dnsdomain_register_for_zone(context, fqdomain, zone): - """Associated a DNS domain with an availability zone""" - return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone) - - -def dnsdomain_register_for_project(context, fqdomain, project): - """Associated a DNS domain with a project id""" - return IMPL.dnsdomain_register_for_project(context, fqdomain, project) - - -def dnsdomain_unregister(context, fqdomain): - """Purge associations for the specified DNS zone""" - return IMPL.dnsdomain_unregister(context, fqdomain) - - -def dnsdomain_get(context, fqdomain): - """Get the db record for the specified domain.""" - return IMPL.dnsdomain_get(context, fqdomain) - - -#################### - - def migration_update(context, id, values): """Update a migration instance.""" return IMPL.migration_update(context, id, values) @@ -407,96 +210,6 @@ def iscsi_target_create_safe(context, values): ############### - -def auth_token_destroy(context, token_id): - """Destroy an auth token.""" - return IMPL.auth_token_destroy(context, token_id) - - -def auth_token_get(context, token_hash): - """Retrieves a token given the hash representing it.""" - return IMPL.auth_token_get(context, token_hash) - - -def auth_token_update(context, token_hash, values): - """Updates a token given the hash representing it.""" - return IMPL.auth_token_update(context, token_hash, values) - - -def auth_token_create(context, token): - """Creates a new token.""" - return IMPL.auth_token_create(context, token) - - -################### - - -def quota_create(context, project_id, resource, limit): - """Create a quota for the given project and resource.""" - return IMPL.quota_create(context, project_id, resource, limit) - - -def quota_get(context, project_id, resource): - """Retrieve a quota or raise if it does not exist.""" - return IMPL.quota_get(context, project_id, resource) - - -def quota_get_all_by_project(context, project_id): - """Retrieve all quotas associated with a given project.""" - return IMPL.quota_get_all_by_project(context, project_id) - - -def quota_update(context, project_id, resource, limit): - """Update a quota or raise if it does not exist.""" - return IMPL.quota_update(context, project_id, resource, limit) - - -def quota_destroy(context, project_id, resource): - """Destroy the quota or raise if it does not exist.""" - return IMPL.quota_destroy(context, project_id, resource) - - -def quota_destroy_all_by_project(context, project_id): - """Destroy all quotas associated with a given project.""" - return IMPL.quota_get_all_by_project(context, project_id) - - -################### - - -def quota_class_create(context, class_name, resource, limit): - """Create a quota class for the given name and resource.""" - return IMPL.quota_class_create(context, class_name, resource, limit) - - -def quota_class_get(context, class_name, resource): - """Retrieve a quota class or raise if it does not exist.""" - return IMPL.quota_class_get(context, class_name, resource) - - -def quota_class_get_all_by_name(context, class_name): - """Retrieve all quotas associated with a given quota class.""" - return IMPL.quota_class_get_all_by_name(context, class_name) - - -def quota_class_update(context, class_name, resource, limit): - """Update a quota class or raise if it does not exist.""" - return IMPL.quota_class_update(context, class_name, resource, limit) - - -def quota_class_destroy(context, class_name, resource): - """Destroy the quota class or raise if it does not exist.""" - return IMPL.quota_class_destroy(context, class_name, resource) - - -def quota_class_destroy_all_by_name(context, class_name): - """Destroy all quotas associated with a given quota class.""" - return IMPL.quota_class_destroy_all_by_name(context, class_name) - - -################### - - def volume_allocate_iscsi_target(context, volume_id, host): """Atomically allocate a free iscsi_target from the pool.""" return IMPL.volume_allocate_iscsi_target(context, volume_id, host) @@ -611,477 +324,6 @@ def snapshot_update(context, snapshot_id, values): #################### -def block_device_mapping_create(context, values): - """Create an entry of block device mapping""" - return IMPL.block_device_mapping_create(context, values) - - -def block_device_mapping_update(context, bdm_id, values): - """Update an entry of block device mapping""" - return IMPL.block_device_mapping_update(context, bdm_id, values) - - -def block_device_mapping_update_or_create(context, values): - """Update an entry of block device mapping. - If not existed, create a new entry""" - return IMPL.block_device_mapping_update_or_create(context, values) - - -def block_device_mapping_get_all_by_instance(context, instance_uuid): - """Get all block device mapping belonging to a instance""" - return IMPL.block_device_mapping_get_all_by_instance(context, - instance_uuid) - - -def block_device_mapping_destroy(context, bdm_id): - """Destroy the block device mapping.""" - return IMPL.block_device_mapping_destroy(context, bdm_id) - - -def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, - volume_id): - """Destroy the block device mapping or raise if it does not exist.""" - return IMPL.block_device_mapping_destroy_by_instance_and_volume( - context, instance_uuid, volume_id) - - -#################### - - -def security_group_get_all(context): - """Get all security groups.""" - return IMPL.security_group_get_all(context) - - -def security_group_get(context, security_group_id): - """Get security group by its id.""" - return IMPL.security_group_get(context, security_group_id) - - -def security_group_get_by_name(context, project_id, group_name): - """Returns a security group with the specified name from a project.""" - return IMPL.security_group_get_by_name(context, project_id, group_name) - - -def security_group_get_by_project(context, project_id): - """Get all security groups belonging to a project.""" - return IMPL.security_group_get_by_project(context, project_id) - - -def security_group_get_by_instance(context, instance_id): - """Get security groups to which the instance is assigned.""" - return IMPL.security_group_get_by_instance(context, instance_id) - - -def security_group_exists(context, project_id, group_name): - """Indicates if a group name exists in a project.""" - return IMPL.security_group_exists(context, project_id, group_name) - - -def security_group_in_use(context, group_id): - """Indicates if a security group is currently in use.""" - return IMPL.security_group_in_use(context, group_id) - - -def security_group_create(context, values): - """Create a new security group.""" - return IMPL.security_group_create(context, values) - - -def security_group_destroy(context, security_group_id): - """Deletes a security group.""" - return IMPL.security_group_destroy(context, security_group_id) - - -def security_group_count_by_project(context, project_id): - """Count number of security groups in a project.""" - return IMPL.security_group_count_by_project(context, project_id) - - -#################### - - -def security_group_rule_create(context, values): - """Create a new security group.""" - return IMPL.security_group_rule_create(context, values) - - -def security_group_rule_get_by_security_group(context, security_group_id): - """Get all rules for a a given security group.""" - return IMPL.security_group_rule_get_by_security_group(context, - security_group_id) - - -def security_group_rule_get_by_security_group_grantee(context, - security_group_id): - """Get all rules that grant access to the given security group.""" - return IMPL.security_group_rule_get_by_security_group_grantee(context, - security_group_id) - - -def security_group_rule_destroy(context, security_group_rule_id): - """Deletes a security group rule.""" - return IMPL.security_group_rule_destroy(context, security_group_rule_id) - - -def security_group_rule_get(context, security_group_rule_id): - """Gets a security group rule.""" - return IMPL.security_group_rule_get(context, security_group_rule_id) - - -def security_group_rule_count_by_group(context, security_group_id): - """Count rules in a given security group.""" - return IMPL.security_group_rule_count_by_group(context, security_group_id) - - -################### - - -def provider_fw_rule_create(context, rule): - """Add a firewall rule at the provider level (all hosts & instances).""" - return IMPL.provider_fw_rule_create(context, rule) - - -def provider_fw_rule_get_all(context): - """Get all provider-level firewall rules.""" - return IMPL.provider_fw_rule_get_all(context) - - -def provider_fw_rule_destroy(context, rule_id): - """Delete a provider firewall rule from the database.""" - return IMPL.provider_fw_rule_destroy(context, rule_id) - - -################### - - -def user_get(context, id): - """Get user by id.""" - return IMPL.user_get(context, id) - - -def user_get_by_uid(context, uid): - """Get user by uid.""" - return IMPL.user_get_by_uid(context, uid) - - -def user_get_by_access_key(context, access_key): - """Get user by access key.""" - return IMPL.user_get_by_access_key(context, access_key) - - -def user_create(context, values): - """Create a new user.""" - return IMPL.user_create(context, values) - - -def user_delete(context, id): - """Delete a user.""" - return IMPL.user_delete(context, id) - - -def user_get_all(context): - """Create a new user.""" - return IMPL.user_get_all(context) - - -def user_add_role(context, user_id, role): - """Add another global role for user.""" - return IMPL.user_add_role(context, user_id, role) - - -def user_remove_role(context, user_id, role): - """Remove global role from user.""" - return IMPL.user_remove_role(context, user_id, role) - - -def user_get_roles(context, user_id): - """Get global roles for user.""" - return IMPL.user_get_roles(context, user_id) - - -def user_add_project_role(context, user_id, project_id, role): - """Add project role for user.""" - return IMPL.user_add_project_role(context, user_id, project_id, role) - - -def user_remove_project_role(context, user_id, project_id, role): - """Remove project role from user.""" - return IMPL.user_remove_project_role(context, user_id, project_id, role) - - -def user_get_roles_for_project(context, user_id, project_id): - """Return list of roles a user holds on project.""" - return IMPL.user_get_roles_for_project(context, user_id, project_id) - - -def user_update(context, user_id, values): - """Update user.""" - return IMPL.user_update(context, user_id, values) - - -################### - - -def project_get(context, id): - """Get project by id.""" - return IMPL.project_get(context, id) - - -def project_create(context, values): - """Create a new project.""" - return IMPL.project_create(context, values) - - -def project_add_member(context, project_id, user_id): - """Add user to project.""" - return IMPL.project_add_member(context, project_id, user_id) - - -def project_get_all(context): - """Get all projects.""" - return IMPL.project_get_all(context) - - -def project_get_by_user(context, user_id): - """Get all projects of which the given user is a member.""" - return IMPL.project_get_by_user(context, user_id) - - -def project_remove_member(context, project_id, user_id): - """Remove the given user from the given project.""" - return IMPL.project_remove_member(context, project_id, user_id) - - -def project_update(context, project_id, values): - """Update Remove the given user from the given project.""" - return IMPL.project_update(context, project_id, values) - - -def project_delete(context, project_id): - """Delete project.""" - return IMPL.project_delete(context, project_id) - - -def project_get_networks(context, project_id, associate=True): - """Return the network associated with the project. - - If associate is true, it will attempt to associate a new - network if one is not found, otherwise it returns None. - - """ - return IMPL.project_get_networks(context, project_id, associate) - - -################### - - -def console_pool_create(context, values): - """Create console pool.""" - return IMPL.console_pool_create(context, values) - - -def console_pool_get(context, pool_id): - """Get a console pool.""" - return IMPL.console_pool_get(context, pool_id) - - -def console_pool_get_by_host_type(context, compute_host, proxy_host, - console_type): - """Fetch a console pool for a given proxy host, compute host, and type.""" - return IMPL.console_pool_get_by_host_type(context, - compute_host, - proxy_host, - console_type) - - -def console_pool_get_all_by_host_type(context, host, console_type): - """Fetch all pools for given proxy host and type.""" - return IMPL.console_pool_get_all_by_host_type(context, - host, - console_type) - - -def console_create(context, values): - """Create a console.""" - return IMPL.console_create(context, values) - - -def console_delete(context, console_id): - """Delete a console.""" - return IMPL.console_delete(context, console_id) - - -def console_get_by_pool_instance(context, pool_id, instance_id): - """Get console entry for a given instance and pool.""" - return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) - - -def console_get_all_by_instance(context, instance_id): - """Get consoles for a given instance.""" - return IMPL.console_get_all_by_instance(context, instance_id) - - -def console_get(context, console_id, instance_id=None): - """Get a specific console (possibly on a given instance).""" - return IMPL.console_get(context, console_id, instance_id) - - - ################## - - -def instance_type_create(context, values): - """Create a new instance type.""" - return IMPL.instance_type_create(context, values) - - -def instance_type_get_all(context, inactive=False, filters=None): - """Get all instance types.""" - return IMPL.instance_type_get_all( - context, inactive=inactive, filters=filters) - - -def instance_type_get(context, id): - """Get instance type by id.""" - return IMPL.instance_type_get(context, id) - - -def instance_type_get_by_name(context, name): - """Get instance type by name.""" - return IMPL.instance_type_get_by_name(context, name) - - -def instance_type_get_by_flavor_id(context, id): - """Get instance type by name.""" - return IMPL.instance_type_get_by_flavor_id(context, id) - - -def instance_type_destroy(context, name): - """Delete a instance type.""" - return IMPL.instance_type_destroy(context, name) - - -#################### - - -def cell_create(context, values): - """Create a new child Cell entry.""" - return IMPL.cell_create(context, values) - - -def cell_update(context, cell_id, values): - """Update a child Cell entry.""" - return IMPL.cell_update(context, cell_id, values) - - -def cell_delete(context, cell_id): - """Delete a child Cell.""" - return IMPL.cell_delete(context, cell_id) - - -def cell_get(context, cell_id): - """Get a specific child Cell.""" - return IMPL.cell_get(context, cell_id) - - -def cell_get_all(context): - """Get all child Cells.""" - return IMPL.cell_get_all(context) - - -#################### - - -def instance_metadata_get(context, instance_id): - """Get all metadata for an instance.""" - return IMPL.instance_metadata_get(context, instance_id) - - -def instance_metadata_delete(context, instance_id, key): - """Delete the given metadata item.""" - IMPL.instance_metadata_delete(context, instance_id, key) - - -def instance_metadata_update(context, instance_id, metadata, delete): - """Update metadata if it exists, otherwise create it.""" - IMPL.instance_metadata_update(context, instance_id, metadata, delete) - - -#################### - - -def agent_build_create(context, values): - """Create a new agent build entry.""" - return IMPL.agent_build_create(context, values) - - -def agent_build_get_by_triple(context, hypervisor, os, architecture): - """Get agent build by hypervisor/OS/architecture triple.""" - return IMPL.agent_build_get_by_triple(context, hypervisor, os, - architecture) - - -def agent_build_get_all(context): - """Get all agent builds.""" - return IMPL.agent_build_get_all(context) - - -def agent_build_destroy(context, agent_update_id): - """Destroy agent build entry.""" - IMPL.agent_build_destroy(context, agent_update_id) - - -def agent_build_update(context, agent_build_id, values): - """Update agent build entry.""" - IMPL.agent_build_update(context, agent_build_id, values) - - -#################### - - -def bw_usage_get_by_uuids(context, uuids, start_period): - """Return bw usages for instance(s) in a given audit period.""" - return IMPL.bw_usage_get_by_uuids(context, uuids, start_period) - - -def bw_usage_update(context, - uuid, - mac, - start_period, - bw_in, bw_out): - """Update cached bw usage for an instance and network - Creates new record if needed.""" - return IMPL.bw_usage_update(context, - uuid, - mac, - start_period, - bw_in, bw_out) - - -#################### - - -def instance_type_extra_specs_get(context, instance_type_id): - """Get all extra specs for an instance type.""" - return IMPL.instance_type_extra_specs_get(context, instance_type_id) - - -def instance_type_extra_specs_delete(context, instance_type_id, key): - """Delete the given extra specs item.""" - IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) - - -def instance_type_extra_specs_update_or_create(context, instance_type_id, - extra_specs): - """Create or update instance type extra specs. This adds or modifies the - key/value pairs specified in the extra specs dict argument""" - IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, - extra_specs) - - -################## - - def volume_metadata_get(context, volume_id): """Get all metadata for a volume.""" return IMPL.volume_metadata_get(context, volume_id) @@ -1149,24 +391,6 @@ def volume_type_extra_specs_update_or_create(context, volume_type_id, ################### -def s3_image_get(context, image_id): - """Find local s3 image represented by the provided id""" - return IMPL.s3_image_get(context, image_id) - - -def s3_image_get_by_uuid(context, image_uuid): - """Find local s3 image represented by the provided uuid""" - return IMPL.s3_image_get_by_uuid(context, image_uuid) - - -def s3_image_create(context, image_uuid): - """Create local s3 image represented by provided uuid""" - return IMPL.s3_image_create(context, image_uuid) - - -#################### - - def sm_backend_conf_create(context, values): """Create a new SM Backend Config entry.""" return IMPL.sm_backend_conf_create(context, values) @@ -1252,79 +476,149 @@ def sm_volume_get_all(context): """Get all child Zones.""" return IMPL.sm_volume_get_all(context) +################### -#################### + +def quota_create(context, project_id, resource, limit): + """Create a quota for the given project and resource.""" + return IMPL.quota_create(context, project_id, resource, limit) + + +def quota_get(context, project_id, resource): + """Retrieve a quota or raise if it does not exist.""" + return IMPL.quota_get(context, project_id, resource) + + +def quota_get_all_by_project(context, project_id): + """Retrieve all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) + + +def quota_update(context, project_id, resource, limit): + """Update a quota or raise if it does not exist.""" + return IMPL.quota_update(context, project_id, resource, limit) + + +def quota_destroy(context, project_id, resource): + """Destroy the quota or raise if it does not exist.""" + return IMPL.quota_destroy(context, project_id, resource) + + +################### + + +def quota_class_create(context, class_name, resource, limit): + """Create a quota class for the given name and resource.""" + return IMPL.quota_class_create(context, class_name, resource, limit) + + +def quota_class_get(context, class_name, resource): + """Retrieve a quota class or raise if it does not exist.""" + return IMPL.quota_class_get(context, class_name, resource) + + +def quota_class_get_all_by_name(context, class_name): + """Retrieve all quotas associated with a given quota class.""" + return IMPL.quota_class_get_all_by_name(context, class_name) -def aggregate_create(context, values, metadata=None): - """Create a new aggregate with metadata.""" - return IMPL.aggregate_create(context, values, metadata) +def quota_class_update(context, class_name, resource, limit): + """Update a quota class or raise if it does not exist.""" + return IMPL.quota_class_update(context, class_name, resource, limit) -def aggregate_get(context, aggregate_id): - """Get a specific aggregate by id.""" - return IMPL.aggregate_get(context, aggregate_id) +def quota_class_destroy(context, class_name, resource): + """Destroy the quota class or raise if it does not exist.""" + return IMPL.quota_class_destroy(context, class_name, resource) -def aggregate_get_by_host(context, host): - """Get a specific aggregate by host""" - return IMPL.aggregate_get_by_host(context, host) +def quota_class_destroy_all_by_name(context, class_name): + """Destroy all quotas associated with a given quota class.""" + return IMPL.quota_class_destroy_all_by_name(context, class_name) -def aggregate_update(context, aggregate_id, values): - """Update the attributes of an aggregates. If values contains a metadata - key, it updates the aggregate metadata too.""" - return IMPL.aggregate_update(context, aggregate_id, values) +################### -def aggregate_delete(context, aggregate_id): - """Delete an aggregate.""" - return IMPL.aggregate_delete(context, aggregate_id) +def quota_usage_create(context, project_id, resource, in_use, reserved, + until_refresh): + """Create a quota usage for the given project and resource.""" + return IMPL.quota_usage_create(context, project_id, resource, + in_use, reserved, until_refresh) -def aggregate_get_all(context): - """Get all aggregates.""" - return IMPL.aggregate_get_all(context) +def quota_usage_get(context, project_id, resource): + """Retrieve a quota usage or raise if it does not exist.""" + return IMPL.quota_usage_get(context, project_id, resource) -def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): - """Add/update metadata. If set_delete=True, it adds only.""" - IMPL.aggregate_metadata_add(context, aggregate_id, metadata, set_delete) +def quota_usage_get_all_by_project(context, project_id): + """Retrieve all usage associated with a given resource.""" + return IMPL.quota_usage_get_all_by_project(context, project_id) -def aggregate_metadata_get(context, aggregate_id): - """Get metadata for the specified aggregate.""" - return IMPL.aggregate_metadata_get(context, aggregate_id) +def quota_usage_update(context, class_name, resource, in_use, reserved, + until_refresh): + """Update a quota usage or raise if it does not exist.""" + return IMPL.quota_usage_update(context, project_id, resource, + in_use, reserved, until_refresh) -def aggregate_metadata_delete(context, aggregate_id, key): - """Delete the given metadata key.""" - IMPL.aggregate_metadata_delete(context, aggregate_id, key) +def quota_usage_destroy(context, project_id, resource): + """Destroy the quota usage or raise if it does not exist.""" + return IMPL.quota_usage_destroy(context, project_id, resource) -def aggregate_host_add(context, aggregate_id, host): - """Add host to the aggregate.""" - IMPL.aggregate_host_add(context, aggregate_id, host) +################### -def aggregate_host_get_all(context, aggregate_id): - """Get hosts for the specified aggregate.""" - return IMPL.aggregate_host_get_all(context, aggregate_id) +def reservation_create(context, uuid, usage, project_id, resource, delta, + expire): + """Create a reservation for the given project and resource.""" + return IMPL.reservation_create(context, uuid, usage, project_id, + resource, delta, expire) -def aggregate_host_delete(context, aggregate_id, host): - """Delete the given host from the aggregate.""" - IMPL.aggregate_host_delete(context, aggregate_id, host) +def reservation_get(context, uuid): + """Retrieve a reservation or raise if it does not exist.""" + return IMPL.reservation_get(context, uuid) -#################### +def reservation_get_all_by_project(context, project_id): + """Retrieve all reservations associated with a given project.""" + return IMPL.reservation_get_all_by_project(context, project_id) -def instance_fault_create(context, values): - """Create a new Instance Fault.""" - return IMPL.instance_fault_create(context, values) +def reservation_destroy(context, uuid): + """Destroy the reservation or raise if it does not exist.""" + return IMPL.reservation_destroy(context, uuid) + + +################### + + +def quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age): + """Check quotas and create appropriate reservations.""" + return IMPL.quota_reserve(context, resources, quotas, deltas, expire, + until_refresh, max_age) + + +def reservation_commit(context, reservations): + """Commit quota reservations.""" + return IMPL.reservation_commit(context, reservations) + + +def reservation_rollback(context, reservations): + """Roll back quota reservations.""" + return IMPL.reservation_rollback(context, reservations) + + +def quota_destroy_all_by_project(context, project_id): + """Destroy all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) -def instance_fault_get_by_instance_uuids(context, instance_uuids): - """Get all instance faults for the provided instance_uuids.""" - return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids) +def reservation_expire(context): + """Roll back any expired reservations.""" + return IMPL.reservation_expire(context) diff --git a/cinder/db/sqlalchemy/api.py b/cinder/db/sqlalchemy/api.py index 05dd90fcf..714f7429e 100644 --- a/cinder/db/sqlalchemy/api.py +++ b/cinder/db/sqlalchemy/api.py @@ -20,7 +20,6 @@ """Implementation of SQLAlchemy backend.""" import datetime -import functools import warnings from cinder import db @@ -28,15 +27,12 @@ from cinder import exception from cinder import flags from cinder import utils from cinder import log as logging -from cinder.compute import aggregate_states from cinder.db.sqlalchemy import models from cinder.db.sqlalchemy.session import get_session +from cinder.openstack.common import timeutils from sqlalchemy.exc import IntegrityError from sqlalchemy.orm import joinedload -from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import func -from sqlalchemy.sql.expression import asc -from sqlalchemy.sql.expression import desc from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS @@ -137,20 +133,6 @@ def require_volume_exists(f): return wrapper -def require_aggregate_exists(f): - """Decorator to require the specified aggregate to exist. - - Requires the wrapped function to use context and aggregate_id as - their first two arguments. - """ - - @functools.wraps(f) - def wrapper(context, aggregate_id, *args, **kwargs): - db.aggregate_get(context, aggregate_id) - return f(context, aggregate_id, *args, **kwargs) - return wrapper - - def model_query(context, *args, **kwargs): """Query helper that accounts for context's `read_deleted` field. @@ -410,152 +392,6 @@ def iscsi_target_create_safe(context, values): ################### -@require_context -def quota_get(context, project_id, resource, session=None): - result = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.ProjectQuotaNotFound(project_id=project_id) - - return result - - -@require_context -def quota_get_all_by_project(context, project_id): - authorize_project_context(context, project_id) - - rows = model_query(context, models.Quota, read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - result = {'project_id': project_id} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_admin_context -def quota_create(context, project_id, resource, limit): - quota_ref = models.Quota() - quota_ref.project_id = project_id - quota_ref.resource = resource - quota_ref.hard_limit = limit - quota_ref.save() - return quota_ref - - -@require_admin_context -def quota_update(context, project_id, resource, limit): - session = get_session() - with session.begin(): - quota_ref = quota_get(context, project_id, resource, session=session) - quota_ref.hard_limit = limit - quota_ref.save(session=session) - - -@require_admin_context -def quota_destroy(context, project_id, resource): - session = get_session() - with session.begin(): - quota_ref = quota_get(context, project_id, resource, session=session) - quota_ref.delete(session=session) - - -@require_admin_context -def quota_destroy_all_by_project(context, project_id): - session = get_session() - with session.begin(): - quotas = model_query(context, models.Quota, session=session, - read_deleted="no").\ - filter_by(project_id=project_id).\ - all() - - for quota_ref in quotas: - quota_ref.delete(session=session) - - -################### - - -@require_context -def quota_class_get(context, class_name, resource, session=None): - result = model_query(context, models.QuotaClass, session=session, - read_deleted="no").\ - filter_by(class_name=class_name).\ - filter_by(resource=resource).\ - first() - - if not result: - raise exception.QuotaClassNotFound(class_name=class_name) - - return result - - -@require_context -def quota_class_get_all_by_name(context, class_name): - authorize_quota_class_context(context, class_name) - - rows = model_query(context, models.QuotaClass, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - - result = {'class_name': class_name} - for row in rows: - result[row.resource] = row.hard_limit - - return result - - -@require_admin_context -def quota_class_create(context, class_name, resource, limit): - quota_class_ref = models.QuotaClass() - quota_class_ref.class_name = class_name - quota_class_ref.resource = resource - quota_class_ref.hard_limit = limit - quota_class_ref.save() - return quota_class_ref - - -@require_admin_context -def quota_class_update(context, class_name, resource, limit): - session = get_session() - with session.begin(): - quota_class_ref = quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.hard_limit = limit - quota_class_ref.save(session=session) - - -@require_admin_context -def quota_class_destroy(context, class_name, resource): - session = get_session() - with session.begin(): - quota_class_ref = quota_class_get(context, class_name, resource, - session=session) - quota_class_ref.delete(session=session) - - -@require_admin_context -def quota_class_destroy_all_by_name(context, class_name): - session = get_session() - with session.begin(): - quota_classes = model_query(context, models.QuotaClass, - session=session, read_deleted="no").\ - filter_by(class_name=class_name).\ - all() - - for quota_class_ref in quota_classes: - quota_class_ref.delete(session=session) - - -################### - - @require_admin_context def volume_allocate_iscsi_target(context, volume_id, host): session = get_session() @@ -968,8 +804,6 @@ def volume_type_create(context, values): except exception.VolumeTypeNotFoundByName: pass try: - specs = values.get('extra_specs') - values['extra_specs'] = _metadata_refs(values.get('extra_specs'), models.VolumeTypeExtraSpecs) volume_type_ref = models.VolumeTypes() @@ -1170,7 +1004,6 @@ def sm_backend_conf_get(context, sm_backend_id): @require_admin_context def sm_backend_conf_get_by_sr(context, sr_uuid): - session = get_session() return model_query(context, models.SMBackendConf, read_deleted="yes").\ filter_by(sr_uuid=sr_uuid).\ first() @@ -1273,237 +1106,223 @@ def sm_volume_get_all(context): return model_query(context, models.SMVolume, read_deleted="yes").all() -################ +############################### -def _aggregate_get_query(context, model_class, id_field, id, - session=None, read_deleted=None): - return model_query(context, model_class, session=session, - read_deleted=read_deleted).filter(id_field == id) +@require_context +def quota_get(context, project_id, resource, session=None): + result = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() + if not result: + raise exception.ProjectQuotaNotFound(project_id=project_id) -@require_admin_context -def aggregate_create(context, values, metadata=None): - session = get_session() - aggregate = _aggregate_get_query(context, - models.Aggregate, - models.Aggregate.name, - values['name'], - session=session, - read_deleted='yes').first() - values.setdefault('operational_state', aggregate_states.CREATED) - if not aggregate: - aggregate = models.Aggregate() - aggregate.update(values) - aggregate.save(session=session) - elif aggregate.deleted: - values['deleted'] = False - values['deleted_at'] = None - aggregate.update(values) - aggregate.save(session=session) - else: - raise exception.AggregateNameExists(aggregate_name=values['name']) - if metadata: - aggregate_metadata_add(context, aggregate.id, metadata) - return aggregate + return result -@require_admin_context -def aggregate_get(context, aggregate_id): - aggregate = _aggregate_get_query(context, - models.Aggregate, - models.Aggregate.id, - aggregate_id).first() +@require_context +def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) - if not aggregate: - raise exception.AggregateNotFound(aggregate_id=aggregate_id) + rows = model_query(context, models.Quota, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() - return aggregate + result = {'project_id': project_id} + for row in rows: + result[row.resource] = row.hard_limit + + return result @require_admin_context -def aggregate_get_by_host(context, host): - aggregate_host = _aggregate_get_query(context, - models.AggregateHost, - models.AggregateHost.host, - host).first() +def quota_create(context, project_id, resource, limit): + quota_ref = models.Quota() + quota_ref.project_id = project_id + quota_ref.resource = resource + quota_ref.hard_limit = limit + quota_ref.save() + return quota_ref - if not aggregate_host: - raise exception.AggregateHostNotFound(host=host) - return aggregate_get(context, aggregate_host.aggregate_id) +@require_admin_context +def quota_update(context, project_id, resource, limit): + session = get_session() + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.hard_limit = limit + quota_ref.save(session=session) @require_admin_context -def aggregate_update(context, aggregate_id, values): +def quota_destroy(context, project_id, resource): session = get_session() - aggregate = _aggregate_get_query(context, - models.Aggregate, - models.Aggregate.id, - aggregate_id, - session=session).first() - if aggregate: - metadata = values.get('metadata') - if metadata is not None: - aggregate_metadata_add(context, - aggregate_id, - values.pop('metadata'), - set_delete=True) - with session.begin(): - aggregate.update(values) - aggregate.save(session=session) - values['metadata'] = metadata - return aggregate - else: - raise exception.AggregateNotFound(aggregate_id=aggregate_id) + with session.begin(): + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.delete(session=session) @require_admin_context -def aggregate_delete(context, aggregate_id): - query = _aggregate_get_query(context, - models.Aggregate, - models.Aggregate.id, - aggregate_id) - if query.first(): - query.update({'deleted': True, - 'deleted_at': utils.utcnow(), - 'operational_state': aggregate_states.DISMISSED, - 'updated_at': literal_column('updated_at')}) - else: - raise exception.AggregateNotFound(aggregate_id=aggregate_id) +def quota_destroy_all_by_project(context, project_id): + session = get_session() + with session.begin(): + quotas = model_query(context, models.Quota, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + for quota_ref in quotas: + quota_ref.delete(session=session) -@require_admin_context -def aggregate_get_all(context): - return model_query(context, models.Aggregate).all() +################### -@require_admin_context -@require_aggregate_exists -def aggregate_metadata_get(context, aggregate_id): - rows = model_query(context, - models.AggregateMetadata).\ - filter_by(aggregate_id=aggregate_id).all() - return dict([(r['key'], r['value']) for r in rows]) +@require_context +def quota_class_get(context, class_name, resource, session=None): + result = model_query(context, models.QuotaClass, session=session, + read_deleted="no").\ + filter_by(class_name=class_name).\ + filter_by(resource=resource).\ + first() + + if not result: + raise exception.QuotaClassNotFound(class_name=class_name) + + return result + + +@require_context +def quota_class_get_all_by_name(context, class_name): + authorize_quota_class_context(context, class_name) + + rows = model_query(context, models.QuotaClass, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() + + result = {'class_name': class_name} + for row in rows: + result[row.resource] = row.hard_limit + + return result @require_admin_context -@require_aggregate_exists -def aggregate_metadata_delete(context, aggregate_id, key): - query = _aggregate_get_query(context, - models.AggregateMetadata, - models.AggregateMetadata.aggregate_id, - aggregate_id).\ - filter_by(key=key) - if query.first(): - query.update({'deleted': True, - 'deleted_at': utils.utcnow(), - 'updated_at': literal_column('updated_at')}) - else: - raise exception.AggregateMetadataNotFound(aggregate_id=aggregate_id, - metadata_key=key) +def quota_class_create(context, class_name, resource, limit): + quota_class_ref = models.QuotaClass() + quota_class_ref.class_name = class_name + quota_class_ref.resource = resource + quota_class_ref.hard_limit = limit + quota_class_ref.save() + return quota_class_ref @require_admin_context -@require_aggregate_exists -def aggregate_metadata_get_item(context, aggregate_id, key, session=None): - result = _aggregate_get_query(context, - models.AggregateMetadata, - models.AggregateMetadata.aggregate_id, - aggregate_id, session=session, - read_deleted='yes').\ - filter_by(key=key).first() +def quota_class_update(context, class_name, resource, limit): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.hard_limit = limit + quota_class_ref.save(session=session) - if not result: - raise exception.AggregateMetadataNotFound(metadata_key=key, - aggregate_id=aggregate_id) - return result +@require_admin_context +def quota_class_destroy(context, class_name, resource): + session = get_session() + with session.begin(): + quota_class_ref = quota_class_get(context, class_name, resource, + session=session) + quota_class_ref.delete(session=session) @require_admin_context -@require_aggregate_exists -def aggregate_metadata_add(context, aggregate_id, metadata, set_delete=False): +def quota_class_destroy_all_by_name(context, class_name): session = get_session() + with session.begin(): + quota_classes = model_query(context, models.QuotaClass, + session=session, read_deleted="no").\ + filter_by(class_name=class_name).\ + all() - if set_delete: - original_metadata = aggregate_metadata_get(context, aggregate_id) - for meta_key, meta_value in original_metadata.iteritems(): - if meta_key not in metadata: - meta_ref = aggregate_metadata_get_item(context, aggregate_id, - meta_key, session) - meta_ref.update({'deleted': True}) - meta_ref.save(session=session) + for quota_class_ref in quota_classes: + quota_class_ref.delete(session=session) - meta_ref = None - for meta_key, meta_value in metadata.iteritems(): - item = {"value": meta_value} - try: - meta_ref = aggregate_metadata_get_item(context, aggregate_id, - meta_key, session) - if meta_ref.deleted: - item.update({'deleted': False, 'deleted_at': None}) - except exception.AggregateMetadataNotFound: - meta_ref = models.AggregateMetadata() - item.update({"key": meta_key, "aggregate_id": aggregate_id}) +@require_context +def quota_usage_get(context, project_id, resource, session=None): + result = model_query(context, models.QuotaUsage, session=session, + read_deleted="no").\ + filter_by(project_id=project_id).\ + filter_by(resource=resource).\ + first() - meta_ref.update(item) - meta_ref.save(session=session) + if not result: + raise exception.QuotaUsageNotFound(project_id=project_id) - return metadata + return result + + +@require_context +def quota_usage_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) + + rows = model_query(context, models.QuotaUsage, read_deleted="no").\ + filter_by(project_id=project_id).\ + all() + + result = {'project_id': project_id} + for row in rows: + result[row.resource] = dict(in_use=row.in_use, reserved=row.reserved) + + return result @require_admin_context -@require_aggregate_exists -def aggregate_host_get_all(context, aggregate_id): - rows = model_query(context, - models.AggregateHost).\ - filter_by(aggregate_id=aggregate_id).all() +def quota_usage_create(context, project_id, resource, in_use, reserved, + until_refresh, session=None, save=True): + quota_usage_ref = models.QuotaUsage() + quota_usage_ref.project_id = project_id + quota_usage_ref.resource = resource + quota_usage_ref.in_use = in_use + quota_usage_ref.reserved = reserved + quota_usage_ref.until_refresh = until_refresh + + # Allow us to hold the save operation until later; keeps the + # transaction in quota_reserve() from breaking too early + if save: + quota_usage_ref.save(session=session) - return [r.host for r in rows] + return quota_usage_ref @require_admin_context -@require_aggregate_exists -def aggregate_host_delete(context, aggregate_id, host): - query = _aggregate_get_query(context, - models.AggregateHost, - models.AggregateHost.aggregate_id, - aggregate_id).filter_by(host=host) - if query.first(): - query.update({'deleted': True, - 'deleted_at': utils.utcnow(), - 'updated_at': literal_column('updated_at')}) +def quota_usage_update(context, project_id, resource, in_use, reserved, + until_refresh, session=None): + def do_update(session): + quota_usage_ref = quota_usage_get(context, project_id, resource, + session=session) + quota_usage_ref.in_use = in_use + quota_usage_ref.reserved = reserved + quota_usage_ref.until_refresh = until_refresh + quota_usage_ref.save(session=session) + + if session: + # Assume caller started a transaction + do_update(session) else: - raise exception.AggregateHostNotFound(aggregate_id=aggregate_id, - host=host) + session = get_session() + with session.begin(): + do_update(session) @require_admin_context -@require_aggregate_exists -def aggregate_host_add(context, aggregate_id, host): +def quota_usage_destroy(context, project_id, resource): session = get_session() - host_ref = _aggregate_get_query(context, - models.AggregateHost, - models.AggregateHost.aggregate_id, - aggregate_id, - session=session, - read_deleted='yes').\ - filter_by(host=host).first() - if not host_ref: - try: - host_ref = models.AggregateHost() - values = {"host": host, "aggregate_id": aggregate_id, } - host_ref.update(values) - host_ref.save(session=session) - except exception.DBError: - raise exception.AggregateHostConflict(host=host) - elif host_ref.deleted: - host_ref.update({'deleted': False, 'deleted_at': None}) - host_ref.save(session=session) - else: - raise exception.AggregateHostExists(host=host, - aggregate_id=aggregate_id) - return host_ref + with session.begin(): + quota_usage_ref = quota_usage_get(context, project_id, resource, + session=session) + quota_usage_ref.delete(session=session) diff --git a/cinder/openstack/common/timeutils.py b/cinder/openstack/common/timeutils.py new file mode 100644 index 000000000..5eeaf70aa --- /dev/null +++ b/cinder/openstack/common/timeutils.py @@ -0,0 +1,109 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Time related utilities and helper functions. +""" + +import calendar +import datetime +import time + +import iso8601 + + +TIME_FORMAT = "%Y-%m-%dT%H:%M:%S" +PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" + + +def isotime(at=None): + """Stringify time in ISO 8601 format""" + if not at: + at = utcnow() + str = at.strftime(TIME_FORMAT) + tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' + str += ('Z' if tz == 'UTC' else tz) + return str + + +def parse_isotime(timestr): + """Parse time from ISO 8601 format""" + try: + return iso8601.parse_date(timestr) + except iso8601.ParseError as e: + raise ValueError(e.message) + except TypeError as e: + raise ValueError(e.message) + + +def strtime(at=None, fmt=PERFECT_TIME_FORMAT): + """Returns formatted utcnow.""" + if not at: + at = utcnow() + return at.strftime(fmt) + + +def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT): + """Turn a formatted time back into a datetime.""" + return datetime.datetime.strptime(timestr, fmt) + + +def normalize_time(timestamp): + """Normalize time in arbitrary timezone to UTC""" + offset = timestamp.utcoffset() + return timestamp.replace(tzinfo=None) - offset if offset else timestamp + + +def is_older_than(before, seconds): + """Return True if before is older than seconds.""" + return utcnow() - before > datetime.timedelta(seconds=seconds) + + +def utcnow_ts(): + """Timestamp version of our utcnow function.""" + return calendar.timegm(utcnow().timetuple()) + + +def utcnow(): + """Overridable version of utils.utcnow.""" + if utcnow.override_time: + return utcnow.override_time + return datetime.datetime.utcnow() + + +utcnow.override_time = None + + +def set_time_override(override_time=datetime.datetime.utcnow()): + """Override utils.utcnow to return a constant time.""" + utcnow.override_time = override_time + + +def advance_time_delta(timedelta): + """Advance overriden time using a datetime.timedelta.""" + assert(not utcnow.override_time is None) + utcnow.override_time += timedelta + + +def advance_time_seconds(seconds): + """Advance overriden time by seconds.""" + advance_time_delta(datetime.timedelta(0, seconds)) + + +def clear_time_override(): + """Remove the overridden time.""" + utcnow.override_time = None diff --git a/cinder/tests/notifier/test_capacity_notifier.py b/cinder/tests/notifier/test_capacity_notifier.py deleted file mode 100644 index 298de5f60..000000000 --- a/cinder/tests/notifier/test_capacity_notifier.py +++ /dev/null @@ -1,59 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import cinder.db.api -from cinder.notifier import capacity_notifier as cn -from cinder import test -from cinder import utils - - -class CapacityNotifierTestCase(test.TestCase): - """Test case for the Capacity updating notifier.""" - - def _make_msg(self, host, event): - usage_info = dict(memory_mb=123, disk_gb=456) - payload = utils.to_primitive(usage_info, convert_instances=True) - return dict( - publisher_id="compute.%s" % host, - event_type="compute.instance.%s" % event, - payload=payload - ) - - def test_event_type(self): - msg = self._make_msg("myhost", "mymethod") - msg['event_type'] = 'random' - self.assertFalse(cn.notify(msg)) - - def test_bad_event_suffix(self): - msg = self._make_msg("myhost", "mymethod.badsuffix") - self.assertFalse(cn.notify(msg)) - - def test_bad_publisher_id(self): - msg = self._make_msg("myhost", "mymethod.start") - msg['publisher_id'] = 'badpublisher' - self.assertFalse(cn.notify(msg)) - - def test_update_called(self): - def _verify_called(host, context, free_ram_mb_delta, - free_disk_gb_delta, work_delta, vm_delta): - self.assertEquals(free_ram_mb_delta, 123) - self.assertEquals(free_disk_gb_delta, 456) - self.assertEquals(vm_delta, -1) - self.assertEquals(work_delta, -1) - - self.stubs.Set(cinder.db.api, "compute_node_utilization_update", - _verify_called) - msg = self._make_msg("myhost", "delete.end") - self.assertTrue(cn.notify(msg)) diff --git a/openstack-common.conf b/openstack-common.conf index d63233f9d..e61aff5b2 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -1,7 +1,7 @@ [DEFAULT] # The list of modules to copy from openstack-common -modules=cfg,exception,local,importutils,iniparser +modules=cfg,exception,local,importutils,iniparser,timeutils # The base module to hold the copy of openstack.common base=cinder -- 2.45.2